Linux 5.13-rc3

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmCqzFgeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGIgQH/3nAV/fYbUCubEQe
 RXUcjMGznIpdHeMiY/hPezObYnpBI3UAi2JwHCvQfoE8ckbx4tq8Xp+TUWebsdaf
 zpDhKXDj2jHha1f5AixHCn1UFxiqOSn3d2muY2Bh1Nhg7iJuzU8xjIMCcOdss+fp
 8e4wqidOHkpWvGJ96CQ5zCNxeXI+/f7VX2IgdJ+RCDwzbqJlIvvXwAkg1KrguUEz
 EPmhpODqjPbVVc/mhtguMLMWl78WKCTBOSHCcYBolatXfm2ojsnX1hXprypWY4Mg
 vKXxF/91AS8InCC08Jw+puz+fXDBx1jtNmFFhDOFTyz/TvwPaKZiWbAeXOZFJA2Z
 Wm4su7g=
 =cqxg
 -----END PGP SIGNATURE-----

Merge v5.13-rc3 into drm-next

drm/i915 is extremely on fire without the below revert from -rc3:

commit 293837b9ac
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date:   Wed May 19 05:55:57 2021 -1000

    Revert "i915: fix remap_io_sg to verify the pgprot"

Backmerge so we don't have a too wide bisect window for anything
that's a more involved workload than booting the driver.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2021-05-27 13:07:47 +02:00
commit 5522e9f7b0
272 changed files with 2097 additions and 1343 deletions

View File

@ -60,7 +60,6 @@ properties:
maxItems: 2
idt,xtal-load-femtofarads:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 9000
maximum: 22760
description: Optional load capacitor for XTAL1 and XTAL2
@ -84,7 +83,6 @@ patternProperties:
enum: [ 1800000, 2500000, 3300000 ]
idt,slew-percent:
description: The Slew rate control for CMOS single-ended.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 80, 85, 90, 100 ]
required:

View File

@ -102,7 +102,6 @@ patternProperties:
st,adc-channel-names:
description: List of single-ended channel names.
$ref: /schemas/types.yaml#/definitions/string-array
st,filter-order:
description: |

View File

@ -38,6 +38,5 @@ properties:
Duration in seconds which the key should be kept pressed for device to
reset automatically. Device with key pressed reset feature can specify
this property.
$ref: /schemas/types.yaml#/definitions/uint32
additionalProperties: true

View File

@ -92,7 +92,6 @@ properties:
this interconnect to send RPMh commands.
qcom,bcm-voter-names:
$ref: /schemas/types.yaml#/definitions/string-array
description: |
Names for each of the qcom,bcm-voters specified.

View File

@ -4,8 +4,8 @@ This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
However, on some devices there are Serial LEDs (LEDs connected to a 74x164
controller), which can either be controlled by software (exporting the 74x164
as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
by hardware using this driver.
as spi-gpio. See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml),
or by hardware using this driver.
Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
controlled, so the only chance to keep them working is by using this driver.

View File

@ -3,7 +3,7 @@ LEDs connected to Broadcom BCM6358 controller
This controller is present on BCM6358 and BCM6368.
In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller),
which can either be controlled by software (exporting the 74x164 as spi-gpio.
See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml), or
by hardware using this driver.
Required properties:

View File

@ -99,32 +99,26 @@ properties:
Indicates that the channel acts as primary among the bonded channels.
port:
type: object
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
description:
Child port node corresponding to the data input, in accordance with the
video interface bindings defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
The port node must contain at least one endpoint.
Child port node corresponding to the data input. The port node must
contain at least one endpoint.
properties:
endpoint:
type: object
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
remote-endpoint:
description:
A phandle to the remote tuner endpoint subnode in remote node
port.
sync-active:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
description:
Indicates sync signal polarity, 0/1 for low/high respectively.
This property maps to SYNCAC bit in the hardware manual. The
default is 1 (active high).
additionalProperties: false
required:
- compatible
- reg

View File

@ -105,7 +105,6 @@ properties:
- description: Whether the IPA clock is enabled (if valid)
qcom,smem-state-names:
$ref: /schemas/types.yaml#/definitions/string-array
description: The names of the state bits used for SMP2P output
items:
- const: ipa-clock-enabled-valid

View File

@ -9,7 +9,6 @@ Required properties:
"mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
"mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
- reg: Should contain registers location and length
= Data cells =

View File

@ -118,7 +118,7 @@ patternProperties:
description:
Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC,
EXTERNAL_SSC or INTERNAL_SSC.
Refer include/dt-bindings/phy/phy-cadence-torrent.h for the constants to be used.
Refer include/dt-bindings/phy/phy-cadence.h for the constants to be used.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
default: 0

View File

@ -20,7 +20,7 @@ properties:
maxItems: 1
phys:
$ref: /schemas/types.yaml#/definitions/phandle
maxItems: 1
description: phandle to the USB phy
monitored-battery:

View File

@ -49,7 +49,7 @@ properties:
maxItems: 1
memory-region:
$ref: /schemas/types.yaml#/definitions/phandle
maxItems: 1
description:
phandle to a node describing reserved memory (System RAM memory)
The M core can't access all the DDR memory space on some platform,

View File

@ -109,6 +109,16 @@ auxiliary vector.
scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
ptrace
------
When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
the system call type that can be used to distinguish between sc and scv 0
system calls, and the different register conventions can be accounted for.
If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
performed with the sc instruction, if it is 0x3000 then the system call was
performed with the scv 0 instruction.
vsyscall
========

View File

@ -1618,8 +1618,8 @@ F: Documentation/devicetree/bindings/sound/amlogic*
F: sound/soc/meson/
ARM/Amlogic Meson SoC support
M: Neil Armstrong <narmstrong@baylibre.com>
M: Kevin Hilman <khilman@baylibre.com>
R: Neil Armstrong <narmstrong@baylibre.com>
R: Jerome Brunet <jbrunet@baylibre.com>
R: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -12195,6 +12195,7 @@ F: drivers/platform/surface/surfacepro3_button.c
MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://github.com/linux-surface/surface-aggregator-module
C: irc://chat.freenode.net/##linux-surface
@ -14749,7 +14750,6 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
F: drivers/net/wireless/intersil/prism54/
PROC FILESYSTEM
R: Alexey Dobriyan <adobriyan@gmail.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Frozen Wasteland
# *DOCUMENTATION*

View File

@ -482,7 +482,7 @@
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
552 common mount_setattr sys_mount_setattr
553 common quotactl_path sys_quotactl_path
# 553 reserved for quotactl_path
554 common landlock_create_ruleset sys_landlock_create_ruleset
555 common landlock_add_rule sys_landlock_add_rule
556 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -10,6 +10,7 @@ config ARCH_WPCM450
bool "Support for WPCM450 BMC (Hermon)"
depends on ARCH_MULTI_V5
select CPU_ARM926T
select WPCM450_AIC
select NPCM7XX_TIMER
help
General support for WPCM450 BMC (Hermon).

View File

@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
return fpga->irq;
base_irq = platform_get_irq(pdev, 1);
if (base_irq < 0)
if (base_irq < 0) {
base_irq = 0;
} else {
ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
if (ret < 0)
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fpga->base = devm_ioremap_resource(&pdev->dev, res);

View File

@ -456,7 +456,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -14,7 +14,6 @@
ports {
port@0 {
reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@ -29,7 +28,6 @@
ports {
port@0 {
reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;

View File

@ -2573,6 +2573,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2628,6 +2632,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2419,6 +2419,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2474,6 +2478,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -33,7 +33,7 @@
status = "okay";
ports {
port {
port@0 {
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;

View File

@ -1823,6 +1823,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2709,6 +2709,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2764,6 +2768,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -192,6 +192,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -3097,6 +3097,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -3152,6 +3156,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -3191,6 +3199,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2761,6 +2761,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2816,6 +2820,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2499,6 +2499,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2554,6 +2558,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2575,6 +2575,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2630,6 +2634,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -1106,6 +1106,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -1439,6 +1439,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -1478,6 +1482,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -298,8 +298,6 @@
ports {
port@0 {
reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;

View File

@ -1970,6 +1970,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -349,7 +349,6 @@
ports {
port@0 {
reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1>;
@ -364,8 +363,6 @@
ports {
port@0 {
reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2 3 4>;

View File

@ -893,8 +893,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_path 443
__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445

View File

@ -363,7 +363,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
/* _sigfault._perf */
BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);

View File

@ -442,7 +442,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -448,7 +448,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -381,7 +381,7 @@
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
442 n32 mount_setattr sys_mount_setattr
443 n32 quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 n32 landlock_create_ruleset sys_landlock_create_ruleset
445 n32 landlock_add_rule sys_landlock_add_rule
446 n32 landlock_restrict_self sys_landlock_restrict_self

View File

@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
443 n64 quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self

View File

@ -430,7 +430,7 @@
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 o32 mount_setattr sys_mount_setattr
443 o32 quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 o32 landlock_create_ruleset sys_landlock_create_ruleset
445 o32 landlock_add_rule sys_landlock_add_rule
446 o32 landlock_restrict_self sys_landlock_restrict_self

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#define mb() asm volatile ("l.msync" ::: "memory")
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */

View File

@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)

View File

@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
struct memblock_region *region;
v = PAGE_OFFSET;
@ -121,7 +120,7 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
region->base, region->base + region->size);
start, end);
}
}
@ -129,7 +128,6 @@ void __init paging_init(void)
{
extern void tlb_init(void);
unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@ -145,8 +143,6 @@ void __init paging_init(void)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
map_ram();
zone_sizes_init();

View File

@ -440,7 +440,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
@ -235,6 +217,31 @@ static __always_inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x1;
}
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
else
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return regs->gpr[3];
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}
#define arch_has_single_step() (1)
#define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT

View File

@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
if (trap_is_scv(regs)) {
unsigned long error = regs->gpr[3];
return IS_ERR_VALUE(error) ? error : 0;
} else {
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
}
}
static inline long syscall_get_return_value(struct task_struct *task,
@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
/*
* In the general case it's not obvious that we must deal with CCR
* here, as the syscall exit path will also do that for us. However
* there are some places, eg. the signal code, which check ccr to
* decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
if (trap_is_scv(regs)) {
regs->gpr[3] = (long) error ?: val;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
/*
* In the general case it's not obvious that we must deal with
* CCR here, as the syscall exit path will also do that for us.
* However there are some places, eg. the signal code, which
* check ccr to decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
}
}
}

View File

@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
early_ioremap_setup();
/* Initialize the hash table or TLB handling */
early_init_mmu();
early_ioremap_setup();
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance

View File

@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self

View File

@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -488,7 +488,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -178,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@ -202,7 +197,12 @@ ifdef CONFIG_RETPOLINE
endif
endif
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none

View File

@ -447,7 +447,7 @@
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 i386 mount_setattr sys_mount_setattr
443 i386 quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 i386 landlock_create_ruleset sys_landlock_create_ruleset
445 i386 landlock_add_rule sys_landlock_add_rule
446 i386 landlock_restrict_self sys_landlock_restrict_self

View File

@ -364,7 +364,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
if (!atomic_inc_not_zero(&pmc_refcount)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
if (!reserve_pmc_hardware())
if (!reserve_pmc_hardware()) {
err = -EBUSY;
else
} else {
reserve_ds_buffers();
reserve_lbr_buffers();
}
}
if (!err)
atomic_inc(&pmc_refcount);

View File

@ -6253,7 +6253,7 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&

View File

@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
kmem_cache && !cpuc->lbr_xsave &&
(cpuc->lbr_users != cpuc->lbr_pebs_users))
cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}
void release_lbr_buffers(void)
@ -722,6 +716,26 @@ void release_lbr_buffers(void)
}
}
void reserve_lbr_buffers(void)
{
struct kmem_cache *kmem_cache;
struct cpu_hw_events *cpuc;
int cpu;
if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
return;
for_each_possible_cpu(cpu) {
cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
if (!kmem_cache || cpuc->lbr_xsave)
continue;
cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
cpu_to_node(cpu));
}
}
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

View File

@ -1244,6 +1244,8 @@ void reserve_ds_buffers(void);
void release_lbr_buffers(void);
void reserve_lbr_buffers(void);
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
@ -1393,6 +1395,10 @@ static inline void release_lbr_buffers(void)
{
}
static inline void reserve_lbr_buffers(void)
{
}
static inline int intel_pmu_init(void)
{
return 0;

View File

@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}

View File

@ -203,8 +203,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
if (unlikely(data->backup_ghcb_active))
return NULL;
if (unlikely(data->backup_ghcb_active)) {
/*
* Backup-GHCB is also already in use. There is no way
* to continue here so just kill the machine. To make
* panic() work, mark GHCBs inactive so that messages
* can be printed out.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
}
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
@ -221,24 +231,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb;
}
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
/* Restore GHCB from Backup */
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
data->ghcb_active = false;
}
}
/* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr);
@ -323,31 +315,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
memcpy(dst, buf, size);
return ES_OK;
}
/*
* This function uses __put_user() independent of whether kernel or user
* memory is accessed. This works fine because __put_user() does no
* sanity checks of the pointer being accessed. All that it does is
* to report when the access failed.
*
* Also, this function runs in atomic context, so __put_user() is not
* allowed to sleep. The page-fault handler detects that it is running
* in atomic context and will not try to take mmap_sem and handle the
* fault, so additional pagefault_enable()/disable() calls are not
* needed.
*
* The access can't be done via copy_to_user() here because
* vc_write_mem() must not use string instructions to access unsafe
* memory. The reason is that MOVS is emulated by the #VC handler by
* splitting the move up into a read and a write and taking a nested #VC
* exception on whatever of them is the MMIO access. Using string
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
memcpy(&d1, buf, 1);
if (put_user(d1, target))
if (__put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
if (put_user(d2, target))
if (__put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
if (put_user(d4, target))
if (__put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
if (put_user(d8, target))
if (__put_user(d8, target))
goto fault;
break;
default:
@ -378,30 +383,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
memcpy(buf, src, size);
return ES_OK;
}
/*
* This function uses __get_user() independent of whether kernel or user
* memory is accessed. This works fine because __get_user() does no
* sanity checks of the pointer being accessed. All that it does is
* to report when the access failed.
*
* Also, this function runs in atomic context, so __get_user() is not
* allowed to sleep. The page-fault handler detects that it is running
* in atomic context and will not try to take mmap_sem and handle the
* fault, so additional pagefault_enable()/disable() calls are not
* needed.
*
* The access can't be done via copy_from_user() here because
* vc_read_mem() must not use string instructions to access unsafe
* memory. The reason is that MOVS is emulated by the #VC handler by
* splitting the move up into a read and a write and taking a nested #VC
* exception on whatever of them is the MMIO access. Using string
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
if (get_user(d1, s))
if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
if (get_user(d2, s))
if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
if (get_user(d4, s))
if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
if (get_user(d8, s))
if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
@ -461,6 +479,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
/* Restore GHCB from Backup */
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
/*
* Invalidate the GHCB so a VMGEXIT instruction issued
* from userspace won't appear to be valid.
*/
vc_ghcb_invalidate(ghcb);
data->ghcb_active = false;
}
}
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
@ -1255,6 +1296,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD:
exc_invalid_op(ctxt->regs);
break;
case X86_TRAP_PF:
write_cr2(ctxt->fi.cr2);
exc_page_fault(ctxt->regs, error_code);
break;
case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code);
break;
@ -1284,7 +1329,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
*/
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
{
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
@ -1310,16 +1354,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
*/
ghcb = sev_es_get_ghcb(&state);
if (!ghcb) {
/*
* Mark GHCBs inactive so that panic() is able to print the
* message.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
}
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);

View File

@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
@ -138,8 +141,10 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));

View File

@ -1273,16 +1273,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);

View File

@ -413,7 +413,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
443 common quotactl_path sys_quotactl_path
# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self

View File

@ -29,8 +29,6 @@
static struct kobject *block_depr;
DECLARE_RWSEM(bdev_lookup_sem);
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
static DEFINE_IDA(ext_devt_ida);
@ -609,13 +607,8 @@ void del_gendisk(struct gendisk *disk)
blk_integrity_del(disk);
disk_del_events(disk);
/*
* Block lookups of the disk until all bdevs are unhashed and the
* disk is marked as dead (GENHD_FL_UP cleared).
*/
down_write(&bdev_lookup_sem);
mutex_lock(&disk->part0->bd_mutex);
disk->flags &= ~GENHD_FL_UP;
blk_drop_partitions(disk);
mutex_unlock(&disk->part0->bd_mutex);
@ -629,8 +622,6 @@ void del_gendisk(struct gendisk *disk)
remove_inode_hash(disk->part0->bd_inode);
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
up_write(&bdev_lookup_sem);
if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");

View File

@ -4918,7 +4918,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
ret = -EINVAL;
ret = -EFAULT;
goto err;
}
binder_inner_proc_lock(proc);

View File

@ -744,6 +744,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
static int probe_gdrom(struct platform_device *devptr)
{
int err;
/*
* Ensure our "one" device is initialized properly in case of previous
* usages of it
*/
memset(&gd, 0, sizeof(gd));
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warn("ATA Probe for GDROM failed\n");
@ -830,6 +837,8 @@ static int remove_gdrom(struct platform_device *devptr)
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
kfree(gd.cd_info);
kfree(gd.toc);
return 0;
}
@ -845,7 +854,7 @@ static struct platform_driver gdrom_driver = {
static int __init init_gdrom(void)
{
int rc;
gd.toc = NULL;
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
@ -861,8 +870,6 @@ static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
kfree(gd.cd_info);
}
module_init(init_gdrom);

View File

@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
if (!hdp->hd_address)
return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);

View File

@ -442,7 +442,6 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) {
pci_disable_device(pdev);
dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err;
}
pci_set_master(pdev);

View File

@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
ret = dma_buf_pin(attach);
ret = dmabuf->ops->pin(attach);
if (ret)
goto err_unlock;
}
@ -786,7 +786,7 @@ err_attach:
err_unpin:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_buf_unpin(attach);
dmabuf->ops->unpin(attach);
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
__unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_buf_unpin(attach);
dmabuf->ops->unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
}
}
@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
r = dma_buf_pin(attach);
r = attach->dmabuf->ops->pin(attach);
if (r)
return ERR_PTR(r);
}
@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
attach->dmabuf->ops->unpin(attach);
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;

View File

@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
return platform_driver_register(&hidma_mgmt_driver);
/*
* We do not check for return value here, as it is assumed that
* platform_driver_register must not fail. The reason for this is that
* the (potential) hidma_mgmt_of_populate_channels calls above are not
* cleaned up if it does fail, and to do this work is quite
* complicated. In particular, various calls of of_address_to_resource,
* of_irq_to_resource, platform_device_register_full, of_dma_configure,
* and of_msi_configure which then call other functions and so on, must
* be cleaned up - this is not a trivial exercise.
*
* Currently, this module is not intended to be unloaded, and there is
* no module_exit function defined which does the needed cleanup. For
* this reason, we have to assume success here.
*/
platform_driver_register(&hidma_mgmt_driver);
return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");

View File

@ -79,8 +79,6 @@ struct scmi_protocol_events {
int scmi_notification_init(struct scmi_handle *handle);
void scmi_notification_exit(struct scmi_handle *handle);
struct scmi_protocol_handle;
int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
const struct scmi_protocol_handle *ph,
const struct scmi_protocol_events *ee);

View File

@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate));
if (ret)
return 0;
return ret ? ret : le32_to_cpu(rate);
return le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)

View File

@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
{ .compatible = "cdns,gpio-r1p02" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cdns_of_ids);
static struct platform_driver cdns_gpio_driver = {
.driver = {

View File

@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
static int tegra186_irq_set_affinity(struct irq_data *data,
const struct cpumask *dest,
bool force)
{
if (data->parent_data)
return irq_chip_set_affinity_parent(data, dest, force);
return -EINVAL;
}
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
gpio->intc.irq_set_wake = tegra186_irq_set_wake;
gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;

View File

@ -542,7 +542,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
}
/**
* xgpio_of_probe - Probe method for the GPIO device.
* xgpio_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
* Return:

View File

@ -815,10 +815,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->addr = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->addr)) {
dev_err(dev, "ioremap failed\n");
if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
}
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
if (ret < 0)

View File

@ -1786,10 +1786,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->reg_base)) {
dev_err(dev, "failed to remap io region\n");
if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base);
}
dsi->phy = devm_phy_get(dev, "dsim");
if (IS_ERR(dsi->phy)) {

View File

@ -723,7 +723,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
}
/**
* shadow_protect_win() - disable updating values from shadow registers at vsync
* fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: local driver data
* @win: window to protect registers for

View File

@ -102,7 +102,6 @@ config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
default n
help
Choose this option if you want to enable Intel GVT-g graphics

View File

@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
GEM_BUG_ON(!list_empty(&obj->mm.link));
atomic_inc(&obj->mm.shrink_pin);
shrinkable = false;
}

View File

@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
static struct intel_vgpu_type *
intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
{
if (WARN_ON(type_group_id >= gvt->num_types))
return NULL;
return &gvt->types[type_group_id];
}
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
if (!type)
num = 0;
else
num = type->avail_instance;
return sprintf(buf, "%u\n", num);
}
static ssize_t device_api_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
}
static ssize_t description_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
struct intel_vgpu_type *type;
void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
if (!type)
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
type->fence, vgpu_edid_str(type->resolution),
type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *gvt_type_attrs[] = {
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
};
static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
{
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
return true;
}
static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
type = &gvt->types[i];
group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
if (WARN_ON(!group))
goto unwind;
group->name = type->name;
group->attrs = gvt_type_attrs;
gvt_vgpu_type_groups[i] = group;
}
return 0;
unwind:
for (j = 0; j < i; j++) {
group = gvt_vgpu_type_groups[j];
kfree(group);
}
return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
{
int i;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
group = gvt_vgpu_type_groups[i];
gvt_vgpu_type_groups[i] = NULL;
kfree(group);
}
}
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
.get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
if (ret)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
if (ret) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);

View File

@ -574,9 +574,6 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
struct intel_vgpu_type *(*gvt_find_vgpu_type)(
struct intel_gvt *gvt, unsigned int type_group_id);
bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,

View File

@ -49,7 +49,7 @@ enum hypervisor_type {
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);

View File

@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
return !!(handle & ~0xff);
}
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
type = &gvt->types[mtype_get_type_group_id(mtype)];
if (!type)
num = 0;
else
num = type->avail_instance;
return sprintf(buf, "%u\n", num);
}
static ssize_t device_api_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
}
static ssize_t description_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
struct intel_vgpu_type *type;
struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
type = &gvt->types[mtype_get_type_group_id(mtype)];
if (!type)
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
type->fence, vgpu_edid_str(type->resolution),
type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *gvt_type_attrs[] = {
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
};
static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
type = &gvt->types[i];
group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
if (!group)
goto unwind;
group->name = type->name;
group->attrs = gvt_type_attrs;
gvt_vgpu_type_groups[i] = group;
}
return 0;
unwind:
for (j = 0; j < i; j++) {
group = gvt_vgpu_type_groups[j];
kfree(group);
}
return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
{
int i;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
group = gvt_vgpu_type_groups[i];
gvt_vgpu_type_groups[i] = NULL;
kfree(group);
}
}
static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
void *gvt;
struct intel_gvt *gvt;
int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
mdev_get_type_group_id(mdev));
type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type) {
ret = -EINVAL;
goto out;
@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
struct attribute_group **kvm_vgpu_type_groups;
int ret;
ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
if (ret)
return ret;
intel_gvt_ops = ops;
if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
return -EFAULT;
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
return mdev_register_device(dev, &intel_vgpu_ops);
ret = mdev_register_device(dev, &intel_vgpu_ops);
if (ret)
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
return ret;
}
static void kvmgt_host_exit(struct device *dev)
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
mdev_unregister_device(dev);
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)

View File

@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
intel_gvt_host.mpt->host_exit(dev);
intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**

View File

@ -999,12 +999,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
if (i915_gem_object_has_pages(obj)) {
struct list_head *list;
unsigned long flags;
if (i915_gem_object_is_shrinkable(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (!list_empty(&obj->mm.link)) {
struct list_head *list;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
@ -1012,8 +1011,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
list = &i915->mm.shrink_list;
list_move_tail(&obj->mm.link, list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
/* if the object is no longer attached, discard its backing storage */

View File

@ -28,10 +28,46 @@
#include "i915_drv.h"
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
struct sgt_iter sgt;
resource_size_t iobase;
};
#define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
{
if (use_dma(r->iobase))
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
else
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
}
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
set_pte_at(r->mm, addr, pte,
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
r->pfn++; /* track insertions in case we need to unwind later */
r->sgt.curr += PAGE_SIZE;
if (r->sgt.curr >= r->sgt.max)
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
return 0;
}
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
/**
* remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to
@ -46,8 +82,13 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
unsigned long pfn, len, remapped = 0;
int err = 0;
struct remap_pfn r = {
.mm = vma->vm_mm,
.prot = vma->vm_page_prot,
.sgt = __sgt_iter(sgl, use_dma(iobase)),
.iobase = iobase,
};
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
do {
if (use_dma(iobase)) {
if (!sg_dma_len(sgl))
break;
pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
len = sg_dma_len(sgl);
} else {
pfn = page_to_pfn(sg_page(sgl));
len = sgl->length;
}
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
if (unlikely(err)) {
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
return err;
}
err = remap_pfn_range(vma, addr + remapped, pfn, len,
vma->vm_page_prot);
if (err)
break;
remapped += len;
} while ((sgl = __sg_next(sgl)));
if (err)
zap_vma_ptes(vma, addr, remapped);
return err;
return 0;
}

View File

@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
lm80_init_client(client);
/* A few vars need to be filled upon startup */
rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
if (rv < 0)
return rv;
data->fan[f_min][0] = rv;
rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
if (rv < 0)
return rv;
data->fan[f_min][1] = rv;
data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);

View File

@ -473,6 +473,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
list_del(&id_priv->list);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
id_priv->id.device = NULL;
if (id_priv->id.route.addr.dev_addr.sgid_attr) {
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
@ -1860,6 +1861,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
iw_destroy_cm_id(id_priv->cm_id.iw);
}
cma_leave_mc_groups(id_priv);
rdma_restrack_del(&id_priv->res);
cma_release_dev(id_priv);
}
@ -1873,7 +1875,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
put_net(id_priv->id.route.addr.dev_addr.net);
rdma_restrack_del(&id_priv->res);
kfree(id_priv);
}
@ -3774,7 +3775,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
}
id_priv->backlog = backlog;
if (id->device) {
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv);
if (ret)

View File

@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
return ret;
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
if (!uapi_object)
return -EINVAL;
if (IS_ERR(uapi_object))
return PTR_ERR(uapi_object);
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
out_len, &total);
@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
if (ret)
return ret;
if (!user_entry_size)
return -EINVAL;
max_entries = uverbs_attr_ptr_get_array_size(
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
user_entry_size);

View File

@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
case UVERBS_OBJECT_QP:
{
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
enum ib_qp_type qp_type = qp->ibqp.qp_type;
if (qp_type == IB_QPT_RAW_PACKET ||
if (qp->type == IB_QPT_RAW_PACKET ||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
sq->tisn) == obj_id);
}
if (qp_type == MLX5_IB_QPT_DCT)
if (qp->type == MLX5_IB_QPT_DCT)
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
qp->dct.mdct.mqp.qpn) == obj_id;
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
qp->ibqp.qp_num) == obj_id;
}

View File

@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
if (err)
return err;
if (op >= BITS_PER_TYPE(u32))
return -EOPNOTSUPP;
if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
return -EOPNOTSUPP;

View File

@ -4419,6 +4419,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
if (bound) {
rdma_roce_rescan_device(&dev->ib_dev);
mpi->ibdev->ib_active = true;
break;
}
}

View File

@ -346,13 +346,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mr_obj, NULL);
if (ret)
if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
}
if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
return COMPST_COMP_ACK;
else
return COMPST_UPDATE_COMP;
return COMPST_UPDATE_COMP;
}
static inline enum comp_state do_atomic(struct rxe_qp *qp,
@ -366,10 +368,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mr_obj, NULL);
if (ret)
if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
else
return COMPST_COMP_ACK;
}
return COMPST_COMP_ACK;
}
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,

View File

@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
qp->sq.queue = NULL;
return err;
}
@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
qp->rq.queue = NULL;
return err;
}
}
@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
err1:
qp->pd = NULL;
qp->rcq = NULL;
qp->scq = NULL;
qp->srq = NULL;
if (srq)
rxe_drop_ref(srq);
rxe_drop_ref(scq);

View File

@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
size_t length;
@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL;
goto err_out;
}
scq = to_siw_cq(attrs->send_cq);
rcq = to_siw_cq(attrs->recv_cq);
if (!scq || (!rcq && !attrs->srq)) {
if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL;
goto err_out;
@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
else {
/* Zero sized SQ is not supported */
rv = -EINVAL;
goto err_out;
goto err_out_xa;
}
if (num_rqe)
num_rqe = roundup_pow_of_two(num_rqe);
@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
}
}
qp->pd = pd;
qp->scq = scq;
qp->rcq = rcq;
qp->scq = to_siw_cq(attrs->send_cq);
qp->rcq = to_siw_cq(attrs->recv_cq);
if (attrs->srq) {
/*

View File

@ -596,7 +596,7 @@ config IRQ_IDT3243X
config APPLE_AIC
bool "Apple Interrupt Controller (AIC)"
depends on ARM64
default ARCH_APPLE
depends on ARCH_APPLE || COMPILE_TEST
help
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.

Some files were not shown because too many files have changed in this diff Show More