mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Linux 5.8-rc7
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl8d8h4eHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGd0sH/2iktYhMwPxzzpnb eI3OuTX/mRn4vUFOfpx9dmGVleMfKkpbvnn3IY7wA62Qfv7J7lkFRa1Bd1DlqXfW yyGTGDSKG5chiRCOU3s9ni92M4xIzFlrojyt/dIK2lUGMzUPI9FGlZRGQLKqqwLh 2syOXRWbcQ7e52IHtDSy3YBNveKRsP4NyqV+GxGiex18SMB/M3Pw9EMH614eDPsE QAGQi5uGv4hPJtFHgXgUyBPLFHIyFAiVxhFRIj7u2DSEKY79+wO1CGWFiFvdTY4B CbqKXLffY3iQdFsLJkj9Dl8cnOQnoY44V0EBzhhORxeOp71StUVaRwQMFa5tp48G 171s5Hs= =BQIl -----END PGP SIGNATURE----- Merge 5.8-rc7 into usb-next We want the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
c97793089b
3
.mailmap
3
.mailmap
@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
|
||||
Mayuresh Janorkar <mayur@ti.com>
|
||||
Michael Buesch <m@bues.ch>
|
||||
Michel Dänzer <michel@tungstengraphics.com>
|
||||
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
|
||||
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
|
||||
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
|
||||
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
|
||||
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
|
||||
Mitesh shah <mshah@teja.com>
|
||||
|
@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock
|
||||
gating mechanism in Gaudi. Due to how Gaudi is built, the
|
||||
clock gating needs to be disabled in order to access the
|
||||
registers of the TPC and MME engines. This is sometimes needed
|
||||
during debug by the user and hence the user needs this option
|
||||
during debug by the user and hence the user needs this option.
|
||||
The user can supply a bitmask value, each bit represents
|
||||
a different engine to disable/enable its clock gating feature.
|
||||
The bitmask is composed of 20 bits:
|
||||
0 - 7 : DMA channels
|
||||
8 - 11 : MME engines
|
||||
12 - 19 : TPC engines
|
||||
The bit's location of a specific engine can be determined
|
||||
using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values
|
||||
are defined in uapi habanalabs.h file in enum gaudi_engine_id
|
||||
|
||||
What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
|
||||
Date: Jan 2019
|
||||
|
@ -378,6 +378,8 @@ examples:
|
||||
- |
|
||||
sound {
|
||||
compatible = "simple-audio-card";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
simple-audio-card,name = "rsnd-ak4643";
|
||||
simple-audio-card,format = "left_j";
|
||||
@ -391,10 +393,12 @@ examples:
|
||||
"ak4642 Playback", "DAI1 Playback";
|
||||
|
||||
dpcmcpu: simple-audio-card,cpu@0 {
|
||||
reg = <0>;
|
||||
sound-dai = <&rcar_sound 0>;
|
||||
};
|
||||
|
||||
simple-audio-card,cpu@1 {
|
||||
reg = <1>;
|
||||
sound-dai = <&rcar_sound 1>;
|
||||
};
|
||||
|
||||
@ -418,6 +422,8 @@ examples:
|
||||
- |
|
||||
sound {
|
||||
compatible = "simple-audio-card";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
simple-audio-card,routing =
|
||||
"pcm3168a Playback", "DAI1 Playback",
|
||||
@ -426,6 +432,7 @@ examples:
|
||||
"pcm3168a Playback", "DAI4 Playback";
|
||||
|
||||
simple-audio-card,dai-link@0 {
|
||||
reg = <0>;
|
||||
format = "left_j";
|
||||
bitclock-master = <&sndcpu0>;
|
||||
frame-master = <&sndcpu0>;
|
||||
@ -439,22 +446,23 @@ examples:
|
||||
};
|
||||
|
||||
simple-audio-card,dai-link@1 {
|
||||
reg = <1>;
|
||||
format = "i2s";
|
||||
bitclock-master = <&sndcpu1>;
|
||||
frame-master = <&sndcpu1>;
|
||||
|
||||
convert-channels = <8>; /* TDM Split */
|
||||
|
||||
sndcpu1: cpu@0 {
|
||||
sndcpu1: cpu0 {
|
||||
sound-dai = <&rcar_sound 1>;
|
||||
};
|
||||
cpu@1 {
|
||||
cpu1 {
|
||||
sound-dai = <&rcar_sound 2>;
|
||||
};
|
||||
cpu@2 {
|
||||
cpu2 {
|
||||
sound-dai = <&rcar_sound 3>;
|
||||
};
|
||||
cpu@3 {
|
||||
cpu3 {
|
||||
sound-dai = <&rcar_sound 4>;
|
||||
};
|
||||
codec {
|
||||
@ -466,6 +474,7 @@ examples:
|
||||
};
|
||||
|
||||
simple-audio-card,dai-link@2 {
|
||||
reg = <2>;
|
||||
format = "i2s";
|
||||
bitclock-master = <&sndcpu2>;
|
||||
frame-master = <&sndcpu2>;
|
||||
|
@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux
|
||||
+ Ancillary clock features
|
||||
- Time stamp external events
|
||||
- Period output signals configurable from user space
|
||||
- Low Pass Filter (LPF) access from user space
|
||||
- Synchronization of the Linux system time via the PPS subsystem
|
||||
|
||||
PTP hardware clock kernel API
|
||||
@ -94,3 +95,14 @@ Supported hardware
|
||||
|
||||
- Auxiliary Slave/Master Mode Snapshot (optional interrupt)
|
||||
- Target Time (optional interrupt)
|
||||
|
||||
* Renesas (IDT) ClockMatrix™
|
||||
|
||||
- Up to 4 independent PHC channels
|
||||
- Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2)
|
||||
- Programmable output periodic signals
|
||||
- Programmable inputs can time stamp external triggers
|
||||
- Driver and/or hardware configuration through firmware (idtcm.bin)
|
||||
- LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2))
|
||||
- Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs)
|
||||
- Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional)
|
||||
|
@ -26,7 +26,7 @@ Usage
|
||||
|
||||
1) Device creation & deletion
|
||||
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847.
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc
|
||||
|
||||
This creates a bareudp tunnel device which tunnels L3 traffic with ethertype
|
||||
0x8847 (MPLS traffic). The destination port of the UDP header will be set to
|
||||
@ -34,14 +34,21 @@ Usage
|
||||
|
||||
b) ip link delete bareudp0
|
||||
|
||||
2) Device creation with multiple proto mode enabled
|
||||
2) Device creation with multiproto mode enabled
|
||||
|
||||
There are two ways to create a bareudp device for MPLS & IP with multiproto mode
|
||||
enabled.
|
||||
The multiproto mode allows bareudp tunnels to handle several protocols of the
|
||||
same family. It is currently only available for IP and MPLS. This mode has to
|
||||
be enabled explicitly with the "multiproto" flag.
|
||||
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto
|
||||
|
||||
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls
|
||||
For an IPv4 tunnel the multiproto mode allows the tunnel to also handle
|
||||
IPv6.
|
||||
|
||||
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto
|
||||
|
||||
For MPLS, the multiproto mode allows the tunnel to handle both unicast
|
||||
and multicast MPLS packets.
|
||||
|
||||
3) Device Usage
|
||||
|
||||
|
21
MAINTAINERS
21
MAINTAINERS
@ -6956,6 +6956,7 @@ M: Timur Tabi <timur@kernel.org>
|
||||
M: Nicolin Chen <nicoleotsuka@gmail.com>
|
||||
M: Xiubo Li <Xiubo.Lee@gmail.com>
|
||||
R: Fabio Estevam <festevam@gmail.com>
|
||||
R: Shengjiu Wang <shengjiu.wang@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
@ -9312,6 +9313,17 @@ F: Documentation/kbuild/kconfig*
|
||||
F: scripts/Kconfig.include
|
||||
F: scripts/kconfig/
|
||||
|
||||
KCOV
|
||||
R: Dmitry Vyukov <dvyukov@google.com>
|
||||
R: Andrey Konovalov <andreyknvl@google.com>
|
||||
L: kasan-dev@googlegroups.com
|
||||
S: Maintained
|
||||
F: Documentation/dev-tools/kcov.rst
|
||||
F: include/linux/kcov.h
|
||||
F: include/uapi/linux/kcov.h
|
||||
F: kernel/kcov.c
|
||||
F: scripts/Makefile.kcov
|
||||
|
||||
KCSAN
|
||||
M: Marco Elver <elver@google.com>
|
||||
R: Dmitry Vyukov <dvyukov@google.com>
|
||||
@ -11247,7 +11259,7 @@ S: Maintained
|
||||
F: drivers/crypto/atmel-ecc.*
|
||||
|
||||
MICROCHIP I2C DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-at91-*.c
|
||||
@ -11340,17 +11352,17 @@ F: drivers/iio/adc/at91-sama5d2_adc.c
|
||||
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
|
||||
|
||||
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||
|
||||
MICROCHIP SPI DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
MICROCHIP SSC DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/misc/atmel-ssc.c
|
||||
@ -14869,6 +14881,7 @@ F: drivers/s390/block/dasd*
|
||||
F: include/linux/dasd_mod.h
|
||||
|
||||
S390 IOMMU (PCI)
|
||||
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
|
6
Makefile
6
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -567,7 +567,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
|
||||
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
|
||||
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
|
||||
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
|
||||
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
|
||||
endif
|
||||
ifneq ($(GCC_TOOLCHAIN),)
|
||||
@ -1754,7 +1754,7 @@ PHONY += descend $(build-dirs)
|
||||
descend: $(build-dirs)
|
||||
$(build-dirs): prepare
|
||||
$(Q)$(MAKE) $(build)=$@ \
|
||||
single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \
|
||||
single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \
|
||||
need-builtin=1 need-modorder=1
|
||||
|
||||
clean-dirs := $(addprefix _clean_, $(clean-dirs))
|
||||
|
@ -137,7 +137,7 @@ export TEXT_OFFSET
|
||||
|
||||
core-y += arch/arm64/
|
||||
libs-y := arch/arm64/lib/ $(libs-y)
|
||||
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
|
||||
# Default target when executing plain make
|
||||
boot := arch/arm64/boot
|
||||
|
@ -454,10 +454,7 @@
|
||||
status = "okay";
|
||||
phy-mode = "2500base-x";
|
||||
phys = <&cp1_comphy5 2>;
|
||||
fixed-link {
|
||||
speed = <2500>;
|
||||
full-duplex;
|
||||
};
|
||||
managed = "in-band-status";
|
||||
};
|
||||
|
||||
&cp1_spi1 {
|
||||
|
@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
|
||||
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
|
||||
|
||||
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
|
||||
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
|
||||
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
|
||||
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
|
||||
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
|
||||
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
|
||||
|
@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
|
||||
_atomic_spin_unlock_irqrestore(v, flags);
|
||||
}
|
||||
|
||||
#define atomic64_set_release(v, i) atomic64_set((v), (i))
|
||||
|
||||
static __inline__ s64
|
||||
atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
|
@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
|
||||
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
|
||||
unsigned int new_);
|
||||
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
|
||||
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
|
||||
|
||||
/* don't worry...optimizer will get rid of most of this */
|
||||
static inline unsigned long
|
||||
@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
|
||||
#endif
|
||||
case 4: return __cmpxchg_u32((unsigned int *)ptr,
|
||||
(unsigned int)old, (unsigned int)new_);
|
||||
case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
|
||||
}
|
||||
__cmpxchg_called_with_bad_pointer();
|
||||
return old;
|
||||
|
@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
|
||||
_atomic_spin_unlock_irqrestore(ptr, flags);
|
||||
return (unsigned long)prev;
|
||||
}
|
||||
|
||||
u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
|
||||
{
|
||||
unsigned long flags;
|
||||
u8 prev;
|
||||
|
||||
_atomic_spin_lock_irqsave(ptr, flags);
|
||||
if ((prev = *ptr) == old)
|
||||
*ptr = new;
|
||||
_atomic_spin_unlock_irqrestore(ptr, flags);
|
||||
return prev;
|
||||
}
|
||||
|
@ -95,19 +95,40 @@ void __init mem_init(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
static void __init setup_initrd(void)
|
||||
{
|
||||
phys_addr_t start;
|
||||
unsigned long size;
|
||||
|
||||
if (initrd_start >= initrd_end) {
|
||||
pr_info("initrd not found or empty");
|
||||
goto disable;
|
||||
}
|
||||
if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||
pr_err("initrd extends beyond end of memory");
|
||||
/* Ignore the virtul address computed during device tree parsing */
|
||||
initrd_start = initrd_end = 0;
|
||||
|
||||
if (!phys_initrd_size)
|
||||
return;
|
||||
/*
|
||||
* Round the memory region to page boundaries as per free_initrd_mem()
|
||||
* This allows us to detect whether the pages overlapping the initrd
|
||||
* are in use, but more importantly, reserves the entire set of pages
|
||||
* as we don't want these pages allocated for other purposes.
|
||||
*/
|
||||
start = round_down(phys_initrd_start, PAGE_SIZE);
|
||||
size = phys_initrd_size + (phys_initrd_start - start);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
|
||||
if (!memblock_is_region_memory(start, size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
|
||||
(u64)start, size);
|
||||
goto disable;
|
||||
}
|
||||
|
||||
size = initrd_end - initrd_start;
|
||||
memblock_reserve(__pa_symbol(initrd_start), size);
|
||||
if (memblock_is_region_reserved(start, size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
|
||||
(u64)start, size);
|
||||
goto disable;
|
||||
}
|
||||
|
||||
memblock_reserve(start, size);
|
||||
/* Now convert initrd to virtual addresses */
|
||||
initrd_start = (unsigned long)__va(phys_initrd_start);
|
||||
initrd_end = initrd_start + phys_initrd_size;
|
||||
initrd_below_start_ok = 1;
|
||||
|
||||
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||
@ -126,33 +147,36 @@ void __init setup_bootmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t mem_size = 0;
|
||||
phys_addr_t total_mem = 0;
|
||||
phys_addr_t mem_start, end = 0;
|
||||
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||
|
||||
/* Find the memory region containing the kernel */
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t end = reg->base + reg->size;
|
||||
|
||||
if (reg->base <= vmlinux_start && vmlinux_end <= end) {
|
||||
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
|
||||
|
||||
/*
|
||||
* Remove memblock from the end of usable area to the
|
||||
* end of region
|
||||
*/
|
||||
if (reg->base + mem_size < end)
|
||||
memblock_remove(reg->base + mem_size,
|
||||
end - reg->base - mem_size);
|
||||
}
|
||||
end = reg->base + reg->size;
|
||||
if (!total_mem)
|
||||
mem_start = reg->base;
|
||||
if (reg->base <= vmlinux_start && vmlinux_end <= end)
|
||||
BUG_ON(reg->size == 0);
|
||||
total_mem = total_mem + reg->size;
|
||||
}
|
||||
BUG_ON(mem_size == 0);
|
||||
|
||||
/*
|
||||
* Remove memblock from the end of usable area to the
|
||||
* end of region
|
||||
*/
|
||||
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
|
||||
if (mem_start + mem_size < end)
|
||||
memblock_remove(mem_start + mem_size,
|
||||
end - mem_start - mem_size);
|
||||
|
||||
/* Reserve from the start of the kernel to the end of the kernel */
|
||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||
|
||||
set_max_mapnr(PFN_DOWN(mem_size));
|
||||
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
max_low_pfn = max_pfn;
|
||||
set_max_mapnr(max_low_pfn);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
setup_initrd();
|
||||
|
@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
|
||||
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static void __init populate(void *start, void *end)
|
||||
@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
|
||||
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
local_flush_tlb_all();
|
||||
memset(start, 0, end - start);
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
|
||||
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
|
||||
@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
|
||||
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
|
||||
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
|
||||
NULL,
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define BT_MBI_UNIT_PMC 0x04
|
||||
#define BT_MBI_UNIT_GFX 0x06
|
||||
#define BT_MBI_UNIT_SMI 0x0C
|
||||
#define BT_MBI_UNIT_CCK 0x14
|
||||
#define BT_MBI_UNIT_USB 0x43
|
||||
#define BT_MBI_UNIT_SATA 0xA3
|
||||
#define BT_MBI_UNIT_PCIE 0xA6
|
||||
|
@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable,
|
||||
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
|
||||
}
|
||||
|
||||
static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
if (!user_mode(regs))
|
||||
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
|
||||
|
||||
/*
|
||||
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||
* memory by pointing the userspace instruction pointer at it.
|
||||
*/
|
||||
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
|
||||
*
|
||||
@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
|
||||
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
|
||||
u8 opcodes[OPCODE_BUFSIZE];
|
||||
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
||||
bool bad_ip;
|
||||
|
||||
/*
|
||||
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||
* memory by pointing the userspace instruction pointer at it.
|
||||
*/
|
||||
bad_ip = user_mode(regs) &&
|
||||
__chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
|
||||
|
||||
if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
|
||||
OPCODE_BUFSIZE)) {
|
||||
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
|
||||
printk("%sCode: Bad RIP value.\n", loglvl);
|
||||
} else {
|
||||
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
||||
|
@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
|
||||
copy_part(offsetof(struct fxregs_state, st_space), 128,
|
||||
&xsave->i387.st_space, &kbuf, &offset_start, &count);
|
||||
if (header.xfeatures & XFEATURE_MASK_SSE)
|
||||
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
|
||||
copy_part(xstate_offsets[XFEATURE_SSE], 256,
|
||||
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
|
||||
/*
|
||||
* Fill xsave->i387.sw_reserved value for ptrace frame:
|
||||
|
@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
* or a page fault), which can make frame pointers
|
||||
* unreliable.
|
||||
*/
|
||||
|
||||
if (IS_ENABLED(CONFIG_FRAME_POINTER))
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
if (unwind_error(&state))
|
||||
return -EINVAL;
|
||||
|
||||
/* Success path for non-user tasks, i.e. kthreads and idle tasks */
|
||||
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
/*
|
||||
* Find the orc_entry associated with the text address.
|
||||
*
|
||||
* Decrement call return addresses by one so they work for sibling
|
||||
* calls and calls to noreturn functions.
|
||||
* For a call frame (as opposed to a signal frame), state->ip points to
|
||||
* the instruction after the call. That instruction's stack layout
|
||||
* could be different from the call instruction's layout, for example
|
||||
* if the call was to a noreturn function. So get the ORC data for the
|
||||
* call instruction itself.
|
||||
*/
|
||||
orc = orc_find(state->signal ? state->ip : state->ip - 1);
|
||||
if (!orc) {
|
||||
@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
||||
state->sp = task->thread.sp;
|
||||
state->bp = READ_ONCE_NOCHECK(frame->bp);
|
||||
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
|
||||
state->signal = (void *)state->ip == ret_from_fork;
|
||||
}
|
||||
|
||||
if (get_stack_info((unsigned long *)state->sp, state->task,
|
||||
|
@ -358,6 +358,7 @@ SECTIONS
|
||||
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
|
||||
__bss_start = .;
|
||||
*(.bss..page_aligned)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
*(BSS_MAIN)
|
||||
BSS_DECRYPTED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
@ -57,7 +57,7 @@ static inline
|
||||
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
|
||||
int len, __wsum sum, int *err_ptr)
|
||||
{
|
||||
if (access_ok(dst, len))
|
||||
if (access_ok(src, len))
|
||||
return csum_partial_copy_generic((__force const void *)src, dst,
|
||||
len, sum, err_ptr, NULL);
|
||||
if (len)
|
||||
|
@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
trace_binder_unmap_user_end(alloc, index);
|
||||
}
|
||||
mmap_read_unlock(mm);
|
||||
mmput(mm);
|
||||
mmput_async(mm);
|
||||
|
||||
trace_binder_unmap_kernel_start(alloc, index);
|
||||
|
||||
|
@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
|
||||
return next;
|
||||
|
||||
/* When no more children in primary, continue with secondary */
|
||||
if (!IS_ERR_OR_NULL(fwnode->secondary))
|
||||
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
|
||||
next = fwnode_get_next_child_node(fwnode->secondary, child);
|
||||
|
||||
return next;
|
||||
|
@ -2865,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ignore timers tagged with no-reset and no-idle. These are likely in use,
|
||||
* for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
|
||||
* are needed, we could also look at the timer register configuration.
|
||||
*/
|
||||
static int sysc_check_active_timer(struct sysc *ddata)
|
||||
{
|
||||
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
|
||||
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
|
||||
return 0;
|
||||
|
||||
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
|
||||
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id sysc_match_table[] = {
|
||||
{ .compatible = "simple-bus", },
|
||||
{ /* sentinel */ },
|
||||
@ -2921,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_check_active_timer(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_get_clocks(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -814,7 +814,8 @@ static struct inode *devmem_inode;
|
||||
#ifdef CONFIG_IO_STRICT_DEVMEM
|
||||
void revoke_devmem(struct resource *res)
|
||||
{
|
||||
struct inode *inode = READ_ONCE(devmem_inode);
|
||||
/* pairs with smp_store_release() in devmem_init_inode() */
|
||||
struct inode *inode = smp_load_acquire(&devmem_inode);
|
||||
|
||||
/*
|
||||
* Check that the initialization has completed. Losing the race
|
||||
@ -1028,8 +1029,11 @@ static int devmem_init_inode(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* publish /dev/mem initialized */
|
||||
WRITE_ONCE(devmem_inode, inode);
|
||||
/*
|
||||
* Publish /dev/mem initialized.
|
||||
* Pairs with smp_load_acquire() in revoke_devmem().
|
||||
*/
|
||||
smp_store_release(&devmem_inode, inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
|
||||
#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
|
||||
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
|
||||
|
||||
#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
|
||||
#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
|
||||
#define DMTIMER_RESET_WAIT 100000
|
||||
|
||||
@ -44,6 +44,8 @@ struct dmtimer_systimer {
|
||||
u8 ctrl;
|
||||
u8 wakeup;
|
||||
u8 ifctrl;
|
||||
struct clk *fck;
|
||||
struct clk *ick;
|
||||
unsigned long rate;
|
||||
};
|
||||
|
||||
@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void)
|
||||
}
|
||||
|
||||
/* Interface clocks are only available on some SoCs variants */
|
||||
static int __init dmtimer_systimer_init_clock(struct device_node *np,
|
||||
static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
|
||||
struct device_node *np,
|
||||
const char *name,
|
||||
unsigned long *rate)
|
||||
{
|
||||
struct clk *clock;
|
||||
unsigned long r;
|
||||
bool is_ick = false;
|
||||
int error;
|
||||
|
||||
is_ick = !strncmp(name, "ick", 3);
|
||||
|
||||
clock = of_clk_get_by_name(np, name);
|
||||
if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
|
||||
if ((PTR_ERR(clock) == -EINVAL) && is_ick)
|
||||
return 0;
|
||||
else if (IS_ERR(clock))
|
||||
return PTR_ERR(clock);
|
||||
@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np,
|
||||
if (!r)
|
||||
return -ENODEV;
|
||||
|
||||
if (is_ick)
|
||||
t->ick = clock;
|
||||
else
|
||||
t->fck = clock;
|
||||
|
||||
*rate = r;
|
||||
|
||||
return 0;
|
||||
@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
|
||||
|
||||
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
|
||||
{
|
||||
writel_relaxed(0, t->base + t->sysc);
|
||||
if (!dmtimer_systimer_revision1(t))
|
||||
return;
|
||||
|
||||
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
|
||||
}
|
||||
|
||||
static int __init dmtimer_systimer_setup(struct device_node *np,
|
||||
@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
|
||||
pr_err("%s: clock source init failed: %i\n", __func__, error);
|
||||
|
||||
/* For ti-sysc, we have timer clocks at the parent module level */
|
||||
error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
|
||||
error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
|
||||
if (error)
|
||||
goto err_unmap;
|
||||
|
||||
t->rate = rate;
|
||||
|
||||
error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
|
||||
error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
|
||||
if (error)
|
||||
goto err_unmap;
|
||||
|
||||
@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt)
|
||||
struct dmtimer_systimer *t = &clkevt->t;
|
||||
|
||||
dmtimer_systimer_disable(t);
|
||||
clk_disable(t->fck);
|
||||
}
|
||||
|
||||
static void omap_clockevent_unidle(struct clock_event_device *evt)
|
||||
{
|
||||
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
|
||||
struct dmtimer_systimer *t = &clkevt->t;
|
||||
int error;
|
||||
|
||||
error = clk_enable(t->fck);
|
||||
if (error)
|
||||
pr_err("could not enable timer fck on resume: %i\n", error);
|
||||
|
||||
dmtimer_systimer_enable(t);
|
||||
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
|
||||
@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
|
||||
3, /* Timer internal resynch latency */
|
||||
0xffffffff);
|
||||
|
||||
if (of_device_is_compatible(np, "ti,am33xx") ||
|
||||
of_device_is_compatible(np, "ti,am43")) {
|
||||
if (of_machine_is_compatible("ti,am33xx") ||
|
||||
of_machine_is_compatible("ti,am43")) {
|
||||
dev->suspend = omap_clockevent_idle;
|
||||
dev->resume = omap_clockevent_unidle;
|
||||
}
|
||||
@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs)
|
||||
|
||||
clksrc->loadval = readl_relaxed(t->base + t->counter);
|
||||
dmtimer_systimer_disable(t);
|
||||
clk_disable(t->fck);
|
||||
}
|
||||
|
||||
static void dmtimer_clocksource_resume(struct clocksource *cs)
|
||||
{
|
||||
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
|
||||
struct dmtimer_systimer *t = &clksrc->t;
|
||||
int error;
|
||||
|
||||
error = clk_enable(t->fck);
|
||||
if (error)
|
||||
pr_err("could not enable timer fck on resume: %i\n", error);
|
||||
|
||||
dmtimer_systimer_enable(t);
|
||||
writel_relaxed(clksrc->loadval, t->base + t->counter);
|
||||
@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np)
|
||||
dev->mask = CLOCKSOURCE_MASK(32);
|
||||
dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
|
||||
|
||||
if (of_device_is_compatible(np, "ti,am33xx") ||
|
||||
of_device_is_compatible(np, "ti,am43")) {
|
||||
/* Unlike for clockevent, legacy code sets suspend only for am4 */
|
||||
if (of_machine_is_compatible("ti,am43")) {
|
||||
dev->suspend = dmtimer_clocksource_suspend;
|
||||
dev->resume = dmtimer_clocksource_resume;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
|
||||
case PF_INET:
|
||||
if (likely(!inet_sk(sk)->inet_rcv_saddr))
|
||||
return ndev;
|
||||
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
|
||||
ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case PF_INET6:
|
||||
|
@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
&record_type);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Avoid appending tls handshake, alert to tls data */
|
||||
if (skb)
|
||||
tx_skb_finalize(skb);
|
||||
}
|
||||
|
||||
recordsz = size;
|
||||
csk->tlshws.txleft = recordsz;
|
||||
csk->tlshws.type = record_type;
|
||||
|
||||
if (skb)
|
||||
ULP_SKB_CB(skb)->ulp.tls.type = record_type;
|
||||
}
|
||||
|
||||
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
|
||||
|
@ -356,10 +356,7 @@ static struct pstore_info efi_pstore_info = {
|
||||
|
||||
static __init int efivars_pstore_init(void)
|
||||
{
|
||||
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
|
||||
return 0;
|
||||
|
||||
if (!efivars_kobject())
|
||||
if (!efivars_kobject() || !efivar_supports_writes())
|
||||
return 0;
|
||||
|
||||
if (efivars_pstore_disable)
|
||||
|
@ -176,11 +176,13 @@ static struct efivar_operations generic_ops;
|
||||
static int generic_ops_register(void)
|
||||
{
|
||||
generic_ops.get_variable = efi.get_variable;
|
||||
generic_ops.set_variable = efi.set_variable;
|
||||
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
|
||||
generic_ops.get_next_variable = efi.get_next_variable;
|
||||
generic_ops.query_variable_store = efi_query_variable_store;
|
||||
|
||||
if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
|
||||
generic_ops.set_variable = efi.set_variable;
|
||||
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
|
||||
}
|
||||
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
|
||||
}
|
||||
|
||||
@ -382,7 +384,8 @@ static int __init efisubsys_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
|
||||
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
|
||||
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
|
||||
efivar_ssdt_load();
|
||||
error = generic_ops_register();
|
||||
if (error)
|
||||
@ -416,7 +419,8 @@ static int __init efisubsys_init(void)
|
||||
err_remove_group:
|
||||
sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
|
||||
err_unregister:
|
||||
if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
|
||||
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
|
||||
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
|
||||
generic_ops_unregister();
|
||||
err_put:
|
||||
kobject_put(efi_kobj);
|
||||
|
@ -680,11 +680,8 @@ int efivars_sysfs_init(void)
|
||||
struct kobject *parent_kobj = efivars_kobject();
|
||||
int error = 0;
|
||||
|
||||
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
|
||||
return -ENODEV;
|
||||
|
||||
/* No efivars has been registered yet */
|
||||
if (!parent_kobj)
|
||||
if (!parent_kobj || !efivar_supports_writes())
|
||||
return 0;
|
||||
|
||||
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
|
||||
|
@ -6,8 +6,7 @@
|
||||
# enabled, even if doing so doesn't break the build.
|
||||
#
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small \
|
||||
$(call cc-option,-maccumulate-outgoing-args)
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small
|
||||
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
|
||||
-fPIC -fno-strict-aliasing -mno-red-zone \
|
||||
-mno-mmx -mno-sse -fshort-wchar \
|
||||
|
@ -44,7 +44,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
|
||||
*addr = ALIGN((unsigned long)alloc_addr, align);
|
||||
|
||||
if (slack > 0) {
|
||||
int l = (alloc_addr % align) / EFI_PAGE_SIZE;
|
||||
int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE;
|
||||
|
||||
if (l) {
|
||||
efi_bs_call(free_pages, alloc_addr, slack - l + 1);
|
||||
|
@ -121,23 +121,6 @@ static unsigned long get_dram_base(void)
|
||||
return membase;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function handles the architcture specific differences between arm and
|
||||
* arm64 regarding where the kernel image must be loaded and any memory that
|
||||
* must be reserved. On failure it is required to free all
|
||||
* all allocations it has made.
|
||||
*/
|
||||
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
||||
unsigned long *image_size,
|
||||
unsigned long *reserve_addr,
|
||||
unsigned long *reserve_size,
|
||||
unsigned long dram_base,
|
||||
efi_loaded_image_t *image);
|
||||
|
||||
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
|
||||
unsigned long fdt_addr,
|
||||
unsigned long fdt_size);
|
||||
|
||||
/*
|
||||
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
|
||||
* that is described in the PE/COFF header. Most of the code is the same
|
||||
|
@ -776,6 +776,22 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
|
||||
unsigned long *load_size,
|
||||
unsigned long soft_limit,
|
||||
unsigned long hard_limit);
|
||||
/*
|
||||
* This function handles the architcture specific differences between arm and
|
||||
* arm64 regarding where the kernel image must be loaded and any memory that
|
||||
* must be reserved. On failure it is required to free all
|
||||
* all allocations it has made.
|
||||
*/
|
||||
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
||||
unsigned long *image_size,
|
||||
unsigned long *reserve_addr,
|
||||
unsigned long *reserve_size,
|
||||
unsigned long dram_base,
|
||||
efi_loaded_image_t *image);
|
||||
|
||||
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
|
||||
unsigned long fdt_addr,
|
||||
unsigned long fdt_size);
|
||||
|
||||
void efi_handle_post_ebs_state(void);
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
#include <asm/efi.h>
|
||||
#include <asm/e820/types.h>
|
||||
@ -361,8 +362,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
||||
int options_size = 0;
|
||||
efi_status_t status;
|
||||
char *cmdline_ptr;
|
||||
unsigned long ramdisk_addr;
|
||||
unsigned long ramdisk_size;
|
||||
|
||||
efi_system_table = sys_table_arg;
|
||||
|
||||
@ -390,8 +389,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
||||
|
||||
hdr = &boot_params->hdr;
|
||||
|
||||
/* Copy the second sector to boot_params */
|
||||
memcpy(&hdr->jump, image_base + 512, 512);
|
||||
/* Copy the setup header from the second sector to boot_params */
|
||||
memcpy(&hdr->jump, image_base + 512,
|
||||
sizeof(struct setup_header) - offsetof(struct setup_header, jump));
|
||||
|
||||
/*
|
||||
* Fill out some of the header fields ourselves because the
|
||||
|
@ -1229,3 +1229,9 @@ out:
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(efivars_unregister);
|
||||
|
||||
int efivar_supports_writes(void)
|
||||
{
|
||||
return __efivars && __efivars->ops->set_variable;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(efivar_supports_writes);
|
||||
|
@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev)
|
||||
* on this port and minimum soft reset pulse width has elapsed.
|
||||
* Driver polls port_soft_reset_ack to determine if reset done by HW.
|
||||
*/
|
||||
if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
|
||||
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
|
||||
v & PORT_CTRL_SFTRST_ACK,
|
||||
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
|
||||
dev_err(&pdev->dev, "timeout, fail to reset device\n");
|
||||
return -ETIMEDOUT;
|
||||
|
@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
|
||||
{
|
||||
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
|
||||
struct dfl_fpga_cdev *cdev = drvdata->cdev;
|
||||
int ret = 0;
|
||||
|
||||
if (!num_vfs) {
|
||||
/*
|
||||
@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
|
||||
dfl_fpga_cdev_config_ports_pf(cdev);
|
||||
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* before enable SRIOV, put released ports into VF access mode
|
||||
* first of all.
|
||||
|
@ -778,8 +778,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
tmp_str++;
|
||||
while (isspace(*++tmp_str));
|
||||
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
||||
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
@ -1039,8 +1038,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
|
||||
memcpy(buf_cpy, buf, bytes);
|
||||
buf_cpy[bytes] = '\0';
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
if (ret)
|
||||
@ -1637,8 +1635,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||
i++;
|
||||
memcpy(buf_cpy, buf, count-i);
|
||||
tmp_str = buf_cpy;
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
||||
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
||||
|
||||
/* sclk is bigger than max sclk in the dependence table */
|
||||
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
|
||||
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
|
||||
(dep_table->entries[i - 1].vddc -
|
||||
(uint16_t)VDDC_VDDCI_DELTA));
|
||||
|
||||
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
|
||||
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
|
||||
@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
||||
else if (dep_table->entries[i - 1].vddci)
|
||||
*voltage |= (dep_table->entries[i - 1].vddci *
|
||||
VOLTAGE_SCALE) << VDDC_SHIFT;
|
||||
else
|
||||
else {
|
||||
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
|
||||
(dep_table->entries[i - 1].vddc -
|
||||
(uint16_t)VDDC_VDDCI_DELTA));
|
||||
|
||||
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
|
||||
}
|
||||
|
||||
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
|
||||
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
|
||||
|
@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip)
|
||||
|
||||
int lima_pp_bcast_resume(struct lima_ip *ip)
|
||||
{
|
||||
/* PP has been reset by individual PP resume */
|
||||
ip->data.async_reset = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
|
||||
unsigned long reg;
|
||||
|
||||
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
|
||||
if (reg & SUN4I_HDMI_HPD_HIGH) {
|
||||
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
|
||||
cec_phys_addr_invalidate(hdmi->cec_adap);
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
|
||||
/* Read data if receive data valid is set */
|
||||
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
|
||||
CDNS_I2C_SR_RXDV) {
|
||||
/*
|
||||
* Clear hold bit that was set for FIFO control if
|
||||
* RX data left is less than FIFO depth, unless
|
||||
* repeated start is selected.
|
||||
*/
|
||||
if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
|
||||
!id->bus_hold_flag)
|
||||
cdns_i2c_clear_bus_hold(id);
|
||||
|
||||
if (id->recv_count > 0) {
|
||||
*(id->p_recv_buf)++ =
|
||||
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
|
||||
id->recv_count--;
|
||||
id->curr_recv_count--;
|
||||
|
||||
/*
|
||||
* Clear hold bit that was set for FIFO control
|
||||
* if RX data left is less than or equal to
|
||||
* FIFO DEPTH unless repeated start is selected
|
||||
*/
|
||||
if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
|
||||
!id->bus_hold_flag)
|
||||
cdns_i2c_clear_bus_hold(id);
|
||||
|
||||
} else {
|
||||
dev_err(id->adap.dev.parent,
|
||||
"xfer_size reg rollover. xfer aborted!\n");
|
||||
@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
|
||||
* Check for the message size against FIFO depth and set the
|
||||
* 'hold bus' bit if it is greater than FIFO depth.
|
||||
*/
|
||||
if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
|
||||
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
|
||||
ctrl_reg |= CDNS_I2C_CR_HOLD;
|
||||
else
|
||||
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
|
||||
|
||||
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
|
||||
|
||||
@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
|
||||
* Check for the message size against FIFO depth and set the
|
||||
* 'hold bus' bit if it is greater than FIFO depth.
|
||||
*/
|
||||
if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
|
||||
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
|
||||
ctrl_reg |= CDNS_I2C_CR_HOLD;
|
||||
else
|
||||
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
|
||||
|
||||
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
|
||||
|
||||
/* Clear the interrupts in interrupt status register. */
|
||||
|
@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||
geni_se_select_mode(se, GENI_SE_FIFO);
|
||||
|
||||
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
|
||||
geni_se_setup_m_cmd(se, I2C_READ, m_param);
|
||||
|
||||
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
|
||||
geni_se_select_mode(se, GENI_SE_FIFO);
|
||||
@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||
dma_buf = NULL;
|
||||
}
|
||||
|
||||
geni_se_setup_m_cmd(se, I2C_READ, m_param);
|
||||
|
||||
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
|
||||
if (!time_left)
|
||||
geni_i2c_abort_xfer(gi2c);
|
||||
@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||
geni_se_select_mode(se, GENI_SE_FIFO);
|
||||
|
||||
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
|
||||
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
|
||||
|
||||
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
|
||||
geni_se_select_mode(se, GENI_SE_FIFO);
|
||||
@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||
dma_buf = NULL;
|
||||
}
|
||||
|
||||
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
|
||||
|
||||
if (!dma_buf) /* Get FIFO IRQ */
|
||||
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
|
||||
|
@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
|
||||
/* disable irqs and ensure none is running before clearing ptr */
|
||||
rcar_i2c_write(priv, ICSIER, 0);
|
||||
rcar_i2c_write(priv, ICSCR, 0);
|
||||
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
|
||||
|
||||
synchronize_irq(priv->irq);
|
||||
priv->slave = NULL;
|
||||
@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
goto out_pm_put;
|
||||
|
||||
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
|
||||
|
||||
if (priv->devtype == I2C_RCAR_GEN3) {
|
||||
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
|
||||
if (!IS_ERR(priv->rstc)) {
|
||||
|
@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
|
||||
return ret;
|
||||
}
|
||||
cm_id_priv->id.state = IB_CM_IDLE;
|
||||
spin_lock_irq(&cm.lock);
|
||||
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
|
||||
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
|
||||
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
|
||||
}
|
||||
spin_unlock_irq(&cm.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
|
||||
{
|
||||
struct ib_uverbs_file *ufile = attrs->ufile;
|
||||
|
||||
/* alloc_commit consumes the uobj kref */
|
||||
uobj->uapi_object->type_class->alloc_commit(uobj);
|
||||
|
||||
/* kref is held so long as the uobj is on the uobj list. */
|
||||
uverbs_uobject_get(uobj);
|
||||
spin_lock_irq(&ufile->uobjects_lock);
|
||||
@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
|
||||
/* matches atomic_set(-1) in alloc_uobj */
|
||||
atomic_set(&uobj->usecnt, 0);
|
||||
|
||||
/* alloc_commit consumes the uobj kref */
|
||||
uobj->uapi_object->type_class->alloc_commit(uobj);
|
||||
|
||||
/* Matches the down_read in rdma_alloc_begin_uobject */
|
||||
up_read(&ufile->hw_destroy_rwsem);
|
||||
}
|
||||
|
@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
|
||||
const struct ib_qp_attr *attr)
|
||||
{
|
||||
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
|
||||
return IB_MTU_4096;
|
||||
|
||||
return attr->path_mtu;
|
||||
}
|
||||
|
||||
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
||||
const struct ib_qp_attr *attr, int attr_mask,
|
||||
struct hns_roce_v2_qp_context *context,
|
||||
@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||
dma_addr_t trrl_ba;
|
||||
dma_addr_t irrl_ba;
|
||||
enum ib_mtu mtu;
|
||||
u8 port_num;
|
||||
u64 *mtts;
|
||||
u8 *dmac;
|
||||
@ -4062,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
||||
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
|
||||
V2_QPC_BYTE_52_DMAC_S, 0);
|
||||
|
||||
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
|
||||
mtu = get_mtu(ibqp, attr);
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MTU) {
|
||||
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
|
||||
V2_QPC_BYTE_24_MTU_S, mtu);
|
||||
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
|
||||
V2_QPC_BYTE_24_MTU_S, 0);
|
||||
}
|
||||
|
||||
#define MAX_LP_MSG_LEN 65536
|
||||
/* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
|
||||
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
|
||||
V2_QPC_BYTE_56_LP_PKTN_INI_S,
|
||||
ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
|
||||
ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
|
||||
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
|
||||
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
|
||||
|
||||
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
|
||||
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
|
||||
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
|
||||
else if (attr_mask & IB_QP_PATH_MTU)
|
||||
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
|
||||
V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
|
||||
|
||||
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
|
||||
V2_QPC_BYTE_24_MTU_S, 0);
|
||||
|
||||
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
|
||||
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
|
||||
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
|
||||
|
@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
|
||||
|
||||
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
|
||||
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
|
||||
hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
|
||||
hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
|
||||
buf_attr.region[0].size = length;
|
||||
buf_attr.region[0].hopnum = mr->pbl_hop_num;
|
||||
buf_attr.region_count = 1;
|
||||
|
@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
|
||||
*/
|
||||
synchronize_srcu(&dev->odp_srcu);
|
||||
|
||||
/*
|
||||
* All work on the prefetch list must be completed, xa_erase() prevented
|
||||
* new work from being created.
|
||||
*/
|
||||
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
|
||||
|
||||
/*
|
||||
* At this point it is forbidden for any other thread to enter
|
||||
* pagefault_mr() on this imr. It is already forbidden to call
|
||||
* pagefault_mr() on an implicit child. Due to this additions to
|
||||
* implicit_children are prevented.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Block destroy_unused_implicit_child_mr() from incrementing
|
||||
* num_deferred_work.
|
||||
*/
|
||||
xa_lock(&imr->implicit_children);
|
||||
xa_for_each (&imr->implicit_children, idx, mtt) {
|
||||
__xa_erase(&imr->implicit_children, idx);
|
||||
@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
|
||||
xa_unlock(&imr->implicit_children);
|
||||
|
||||
/*
|
||||
* num_deferred_work can only be incremented inside the odp_srcu, or
|
||||
* under xa_lock while the child is in the xarray. Thus at this point
|
||||
* it is only decreasing, and all work holding it is now on the wq.
|
||||
* Wait for any concurrent destroy_unused_implicit_child_mr() to
|
||||
* complete.
|
||||
*/
|
||||
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
|
||||
|
||||
|
@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
|
||||
struct mlx5_srq_table *table = &dev->srq_table;
|
||||
struct mlx5_core_srq *srq;
|
||||
|
||||
xa_lock(&table->array);
|
||||
xa_lock_irq(&table->array);
|
||||
srq = xa_load(&table->array, srqn);
|
||||
if (srq)
|
||||
refcount_inc(&srq->common.refcount);
|
||||
xa_unlock(&table->array);
|
||||
xa_unlock_irq(&table->array);
|
||||
|
||||
return srq;
|
||||
}
|
||||
|
@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node)
|
||||
{
|
||||
struct icc_provider *p = node->provider;
|
||||
struct icc_req *r;
|
||||
u32 avg_bw, peak_bw;
|
||||
|
||||
node->avg_bw = 0;
|
||||
node->peak_bw = 0;
|
||||
@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node)
|
||||
p->pre_aggregate(node);
|
||||
|
||||
hlist_for_each_entry(r, &node->req_list, req_node) {
|
||||
if (!r->enabled)
|
||||
continue;
|
||||
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
|
||||
if (r->enabled) {
|
||||
avg_bw = r->avg_bw;
|
||||
peak_bw = r->peak_bw;
|
||||
} else {
|
||||
avg_bw = 0;
|
||||
peak_bw = 0;
|
||||
}
|
||||
p->aggregate(node, r->tag, avg_bw, peak_bw,
|
||||
&node->avg_bw, &node->peak_bw);
|
||||
}
|
||||
|
||||
|
@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS,
|
||||
DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
|
||||
DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
|
||||
DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
|
||||
DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
|
||||
DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
|
||||
DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
|
||||
DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
|
||||
DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
|
||||
DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
|
||||
DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
|
||||
DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
|
||||
DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
|
||||
DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
|
||||
DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
|
||||
DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
|
||||
DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
|
||||
DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
|
||||
DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
|
||||
DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
|
||||
DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
|
||||
|
@ -65,6 +65,7 @@ struct qcom_iommu_domain {
|
||||
struct mutex init_mutex; /* Protects iommu pointer */
|
||||
struct iommu_domain domain;
|
||||
struct qcom_iommu_dev *iommu;
|
||||
struct iommu_fwspec *fwspec;
|
||||
};
|
||||
|
||||
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
|
||||
@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
|
||||
return dev_iommu_priv_get(dev);
|
||||
}
|
||||
|
||||
static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
|
||||
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
|
||||
{
|
||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||
struct qcom_iommu_dev *qcom_iommu = d->iommu;
|
||||
if (!qcom_iommu)
|
||||
return NULL;
|
||||
return qcom_iommu->ctxs[asid - 1];
|
||||
@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
|
||||
|
||||
static void qcom_iommu_tlb_sync(void *cookie)
|
||||
{
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct device *dev = cookie;
|
||||
struct qcom_iommu_domain *qcom_domain = cookie;
|
||||
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
|
||||
unsigned i;
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
unsigned int val, ret;
|
||||
|
||||
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
|
||||
@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
|
||||
|
||||
static void qcom_iommu_tlb_inv_context(void *cookie)
|
||||
{
|
||||
struct device *dev = cookie;
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct qcom_iommu_domain *qcom_domain = cookie;
|
||||
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
|
||||
unsigned i;
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
|
||||
}
|
||||
|
||||
@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
|
||||
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||
size_t granule, bool leaf, void *cookie)
|
||||
{
|
||||
struct device *dev = cookie;
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct qcom_iommu_domain *qcom_domain = cookie;
|
||||
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
|
||||
unsigned i, reg;
|
||||
|
||||
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
size_t s = size;
|
||||
|
||||
iova = (iova >> 12) << 12;
|
||||
@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
||||
};
|
||||
|
||||
qcom_domain->iommu = qcom_iommu;
|
||||
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
|
||||
qcom_domain->fwspec = fwspec;
|
||||
|
||||
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
|
||||
if (!pgtbl_ops) {
|
||||
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
|
||||
ret = -ENOMEM;
|
||||
@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
||||
domain->geometry.force_aperture = true;
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
|
||||
if (!ctx->secure_init) {
|
||||
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
|
||||
@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
|
||||
|
||||
pm_runtime_get_sync(qcom_iommu->dev);
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
|
||||
/* Disable the context bank: */
|
||||
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
|
||||
|
@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w)
|
||||
unsigned prev_free_sectors;
|
||||
|
||||
/* the following test is not needed, but it tests the replay code */
|
||||
if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
|
||||
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&ic->endio_wait.lock);
|
||||
@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w)
|
||||
|
||||
next_chunk:
|
||||
|
||||
if (unlikely(dm_suspended(ic->ti)))
|
||||
if (unlikely(dm_post_suspending(ic->ti)))
|
||||
goto unlock_ret;
|
||||
|
||||
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
|
||||
|
@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
|
||||
#define DMF_NOFLUSH_SUSPENDING 5
|
||||
#define DMF_DEFERRED_REMOVE 6
|
||||
#define DMF_SUSPENDED_INTERNALLY 7
|
||||
#define DMF_POST_SUSPENDING 8
|
||||
|
||||
#define DM_NUMA_NODE NUMA_NO_NODE
|
||||
static int dm_numa_node = DM_NUMA_NODE;
|
||||
@ -2408,6 +2409,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||
if (!dm_suspended_md(md)) {
|
||||
dm_table_presuspend_targets(map);
|
||||
set_bit(DMF_SUSPENDED, &md->flags);
|
||||
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||
dm_table_postsuspend_targets(map);
|
||||
}
|
||||
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
|
||||
@ -2766,7 +2768,9 @@ retry:
|
||||
if (r)
|
||||
goto out_unlock;
|
||||
|
||||
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||
dm_table_postsuspend_targets(map);
|
||||
clear_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&md->suspend_lock);
|
||||
@ -2863,7 +2867,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
|
||||
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
|
||||
DMF_SUSPENDED_INTERNALLY);
|
||||
|
||||
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||
dm_table_postsuspend_targets(map);
|
||||
clear_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||
}
|
||||
|
||||
static void __dm_internal_resume(struct mapped_device *md)
|
||||
@ -3024,6 +3030,11 @@ int dm_suspended_md(struct mapped_device *md)
|
||||
return test_bit(DMF_SUSPENDED, &md->flags);
|
||||
}
|
||||
|
||||
static int dm_post_suspending_md(struct mapped_device *md)
|
||||
{
|
||||
return test_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||
}
|
||||
|
||||
int dm_suspended_internally_md(struct mapped_device *md)
|
||||
{
|
||||
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
||||
@ -3040,6 +3051,12 @@ int dm_suspended(struct dm_target *ti)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_suspended);
|
||||
|
||||
int dm_post_suspending(struct dm_target *ti)
|
||||
{
|
||||
return dm_post_suspending_md(dm_table_get_md(ti->table));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_post_suspending);
|
||||
|
||||
int dm_noflush_suspending(struct dm_target *ti)
|
||||
{
|
||||
return __noflush_suspending(dm_table_get_md(ti->table));
|
||||
|
@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev,
|
||||
struct asic_fixed_properties *asic = &hdev->asic_prop;
|
||||
struct hw_queue_properties *hw_queue_prop;
|
||||
|
||||
/* This must be checked here to prevent out-of-bounds access to
|
||||
* hw_queues_props array
|
||||
*/
|
||||
if (chunk->queue_index >= HL_MAX_QUEUES) {
|
||||
dev_err(hdev->dev, "Queue index %d is invalid\n",
|
||||
chunk->queue_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
|
||||
|
||||
if ((chunk->queue_index >= HL_MAX_QUEUES) ||
|
||||
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
|
||||
dev_err(hdev->dev, "Queue index %d is invalid\n",
|
||||
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
|
||||
dev_err(hdev->dev, "Queue index %d is not applicable\n",
|
||||
chunk->queue_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
|
||||
pkt.i2c_reg = i2c_reg;
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_DEVICE_TIMEOUT_USEC, (long *) val);
|
||||
0, (long *) val);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
|
||||
@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
|
||||
pkt.value = cpu_to_le64(val);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_DEVICE_TIMEOUT_USEC, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
|
||||
@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
|
||||
pkt.value = cpu_to_le64(state);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_DEVICE_TIMEOUT_USEC, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
|
||||
@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
|
||||
if (*ppos)
|
||||
return 0;
|
||||
|
||||
sprintf(tmp_buf, "%d\n", hdev->clock_gating);
|
||||
sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
|
||||
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
|
||||
strlen(tmp_buf) + 1);
|
||||
|
||||
@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
|
||||
{
|
||||
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
|
||||
struct hl_device *hdev = entry->hdev;
|
||||
u32 value;
|
||||
u64 value;
|
||||
ssize_t rc;
|
||||
|
||||
if (atomic_read(&hdev->in_reset)) {
|
||||
@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = kstrtouint_from_user(buf, count, 10, &value);
|
||||
rc = kstrtoull_from_user(buf, count, 16, &value);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (value) {
|
||||
hdev->clock_gating = 1;
|
||||
if (hdev->asic_funcs->enable_clock_gating)
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
} else {
|
||||
if (hdev->asic_funcs->disable_clock_gating)
|
||||
hdev->asic_funcs->disable_clock_gating(hdev);
|
||||
hdev->clock_gating = 0;
|
||||
}
|
||||
hdev->clock_gating_mask = value;
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
|
||||
hdev->in_debug = 0;
|
||||
|
||||
if (!hdev->hard_reset_pending)
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
|
||||
pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
|
||||
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
|
||||
sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
|
||||
sizeof(pkt), 0, NULL);
|
||||
}
|
||||
|
||||
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
|
||||
pkt.value = cpu_to_le64(event_type);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
0, &result);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
|
||||
@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
|
||||
total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
total_pkt_size, 0, &result);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "failed to unmask IRQ array\n");
|
||||
@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
|
||||
test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
|
||||
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
sizeof(test_pkt), 0, &result);
|
||||
|
||||
if (!rc) {
|
||||
if (result != ARMCP_PACKET_FENCE_VAL)
|
||||
@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
|
||||
hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
|
||||
sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
sizeof(hb_pkt), 0, &result);
|
||||
|
||||
if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
|
||||
rc = -EIO;
|
||||
|
@ -80,6 +80,7 @@
|
||||
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
|
||||
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
|
||||
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
|
||||
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
|
||||
|
||||
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
|
||||
|
||||
@ -98,6 +99,11 @@
|
||||
|
||||
#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
|
||||
|
||||
#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
|
||||
BIT(GAUDI_ENGINE_ID_MME_0) |\
|
||||
BIT(GAUDI_ENGINE_ID_MME_2) |\
|
||||
GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
|
||||
|
||||
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
|
||||
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
|
||||
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
|
||||
@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
|
||||
};
|
||||
|
||||
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
|
||||
[GAUDI_PCI_DMA_1] = 0,
|
||||
[GAUDI_PCI_DMA_2] = 1,
|
||||
[GAUDI_PCI_DMA_3] = 5,
|
||||
[GAUDI_HBM_DMA_1] = 2,
|
||||
[GAUDI_HBM_DMA_2] = 3,
|
||||
[GAUDI_HBM_DMA_3] = 4,
|
||||
[GAUDI_HBM_DMA_4] = 6,
|
||||
[GAUDI_HBM_DMA_5] = 7
|
||||
[GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
|
||||
[GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
|
||||
[GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
|
||||
[GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
|
||||
[GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
|
||||
[GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
|
||||
[GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
|
||||
[GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
|
||||
};
|
||||
|
||||
static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
|
||||
@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
|
||||
|
||||
gaudi_init_rate_limiter(hdev);
|
||||
|
||||
gaudi_disable_clock_gating(hdev);
|
||||
hdev->asic_funcs->disable_clock_gating(hdev);
|
||||
|
||||
for (tpc_id = 0, tpc_offset = 0;
|
||||
tpc_id < TPC_NUMBER_OF_ENGINES;
|
||||
@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev)
|
||||
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
|
||||
}
|
||||
|
||||
static void gaudi_enable_clock_gating(struct hl_device *hdev)
|
||||
static void gaudi_set_clock_gating(struct hl_device *hdev)
|
||||
{
|
||||
struct gaudi_device *gaudi = hdev->asic_specific;
|
||||
u32 qman_offset;
|
||||
int i;
|
||||
|
||||
if (!hdev->clock_gating)
|
||||
return;
|
||||
|
||||
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
|
||||
return;
|
||||
|
||||
/* In case we are during debug session, don't enable the clock gate
|
||||
* as it may interfere
|
||||
*/
|
||||
if (hdev->in_debug)
|
||||
return;
|
||||
|
||||
for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
|
||||
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
|
||||
if (!(hdev->clock_gating_mask &
|
||||
(BIT_ULL(gaudi_dma_assignment[i]))))
|
||||
continue;
|
||||
|
||||
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
|
||||
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
|
||||
QMAN_UPPER_CP_CGM_PWR_GATE_EN);
|
||||
}
|
||||
|
||||
for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
|
||||
for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
|
||||
if (!(hdev->clock_gating_mask &
|
||||
(BIT_ULL(gaudi_dma_assignment[i]))))
|
||||
continue;
|
||||
|
||||
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
|
||||
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
|
||||
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
|
||||
}
|
||||
|
||||
WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmMME0_QM_CGM_CFG,
|
||||
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
|
||||
WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmMME2_QM_CGM_CFG,
|
||||
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
|
||||
if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
|
||||
WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
|
||||
}
|
||||
|
||||
if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
|
||||
WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
|
||||
}
|
||||
|
||||
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
|
||||
if (!(hdev->clock_gating_mask &
|
||||
(BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
|
||||
continue;
|
||||
|
||||
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
|
||||
QMAN_CGM1_PWR_GATE_EN);
|
||||
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
|
||||
@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
|
||||
gaudi_stop_hbm_dma_qmans(hdev);
|
||||
gaudi_stop_pci_dma_qmans(hdev);
|
||||
|
||||
gaudi_disable_clock_gating(hdev);
|
||||
hdev->asic_funcs->disable_clock_gating(hdev);
|
||||
|
||||
msleep(wait_timeout_ms);
|
||||
|
||||
@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
|
||||
|
||||
gaudi_init_tpc_qmans(hdev);
|
||||
|
||||
gaudi_enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
|
||||
gaudi_enable_timestamp(hdev);
|
||||
|
||||
@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
|
||||
HW_CAP_HBM_DMA | HW_CAP_PLL |
|
||||
HW_CAP_MMU |
|
||||
HW_CAP_SRAM_SCRAMBLER |
|
||||
HW_CAP_HBM_SCRAMBLER);
|
||||
HW_CAP_HBM_SCRAMBLER |
|
||||
HW_CAP_CLK_GATE);
|
||||
|
||||
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
|
||||
}
|
||||
|
||||
@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!timeout)
|
||||
timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
|
||||
|
||||
return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
|
||||
timeout, result);
|
||||
}
|
||||
@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
|
||||
rc = -EPERM;
|
||||
break;
|
||||
|
||||
case PACKET_WREG_BULK:
|
||||
dev_err(hdev->dev,
|
||||
"User not allowed to use WREG_BULK\n");
|
||||
rc = -EPERM;
|
||||
break;
|
||||
|
||||
case PACKET_LOAD_AND_EXE:
|
||||
rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
|
||||
(struct packet_load_and_exe *) user_pkt);
|
||||
@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
|
||||
break;
|
||||
|
||||
case PACKET_WREG_32:
|
||||
case PACKET_WREG_BULK:
|
||||
case PACKET_MSG_LONG:
|
||||
case PACKET_MSG_SHORT:
|
||||
case PACKET_REPEAT:
|
||||
@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
|
||||
int rc = 0;
|
||||
|
||||
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
|
||||
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
|
||||
|
||||
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
|
||||
(hdev->clock_gating_mask &
|
||||
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
|
||||
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Can't read register - clock gating is enabled!\n");
|
||||
rc = -EFAULT;
|
||||
} else {
|
||||
*val = RREG32(addr - CFG_BASE);
|
||||
}
|
||||
|
||||
} else if ((addr >= SRAM_BASE_ADDR) &&
|
||||
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
|
||||
*val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
|
||||
@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
|
||||
int rc = 0;
|
||||
|
||||
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
|
||||
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
|
||||
|
||||
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
|
||||
(hdev->clock_gating_mask &
|
||||
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
|
||||
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Can't write register - clock gating is enabled!\n");
|
||||
rc = -EFAULT;
|
||||
} else {
|
||||
WREG32(addr - CFG_BASE, val);
|
||||
}
|
||||
|
||||
} else if ((addr >= SRAM_BASE_ADDR) &&
|
||||
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
|
||||
writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
|
||||
@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
|
||||
int rc = 0;
|
||||
|
||||
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
|
||||
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
|
||||
|
||||
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
|
||||
(hdev->clock_gating_mask &
|
||||
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
|
||||
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Can't read register - clock gating is enabled!\n");
|
||||
rc = -EFAULT;
|
||||
@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
|
||||
|
||||
*val = (((u64) val_h) << 32) | val_l;
|
||||
}
|
||||
|
||||
} else if ((addr >= SRAM_BASE_ADDR) &&
|
||||
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
|
||||
*val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
|
||||
@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
|
||||
int rc = 0;
|
||||
|
||||
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
|
||||
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
|
||||
|
||||
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
|
||||
(hdev->clock_gating_mask &
|
||||
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
|
||||
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Can't write register - clock gating is enabled!\n");
|
||||
rc = -EFAULT;
|
||||
@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
|
||||
WREG32(addr + sizeof(u32) - CFG_BASE,
|
||||
upper_32_bits(val));
|
||||
}
|
||||
|
||||
} else if ((addr >= SRAM_BASE_ADDR) &&
|
||||
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
|
||||
writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
|
||||
@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
|
||||
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
|
||||
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
|
||||
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
|
||||
mutex_unlock(&gaudi->clk_gate_mutex);
|
||||
}
|
||||
@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
|
||||
}
|
||||
|
||||
if (disable_clock_gating) {
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
mutex_unlock(&gaudi->clk_gate_mutex);
|
||||
}
|
||||
}
|
||||
@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
|
||||
/* Clear interrupts */
|
||||
WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
|
||||
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
|
||||
mutex_unlock(&gaudi->clk_gate_mutex);
|
||||
|
||||
@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
|
||||
if (s)
|
||||
seq_puts(s, "\n");
|
||||
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
|
||||
mutex_unlock(&gaudi->clk_gate_mutex);
|
||||
|
||||
@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
|
||||
dev_err(hdev->dev,
|
||||
"Timeout while waiting for TPC%d icache prefetch\n",
|
||||
tpc_id);
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
mutex_unlock(&gaudi->clk_gate_mutex);
|
||||
return -EIO;
|
||||
}
|
||||
@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
|
||||
1000,
|
||||
kernel_timeout);
|
||||
|
||||
hdev->asic_funcs->enable_clock_gating(hdev);
|
||||
hdev->asic_funcs->set_clock_gating(hdev);
|
||||
mutex_unlock(&gaudi->clk_gate_mutex);
|
||||
|
||||
if (rc) {
|
||||
@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
|
||||
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
|
||||
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
|
||||
.send_heartbeat = gaudi_send_heartbeat,
|
||||
.enable_clock_gating = gaudi_enable_clock_gating,
|
||||
.set_clock_gating = gaudi_set_clock_gating,
|
||||
.disable_clock_gating = gaudi_disable_clock_gating,
|
||||
.debug_coresight = gaudi_debug_coresight,
|
||||
.is_device_idle = gaudi_is_device_idle,
|
||||
|
@ -88,6 +88,7 @@
|
||||
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
|
||||
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
|
||||
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
|
||||
#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
|
||||
|
||||
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
|
||||
|
||||
@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!timeout)
|
||||
timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
|
||||
|
||||
return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
|
||||
timeout, result);
|
||||
}
|
||||
@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
|
||||
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
|
||||
rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
|
||||
HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
|
||||
total_pkt_size, 0, &result);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "failed to unmask IRQ array\n");
|
||||
@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
pkt.value = cpu_to_le64(event_type);
|
||||
|
||||
rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_DEVICE_TIMEOUT_USEC, &result);
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, &result);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
|
||||
@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void goya_enable_clock_gating(struct hl_device *hdev)
|
||||
static void goya_set_clock_gating(struct hl_device *hdev)
|
||||
{
|
||||
|
||||
/* clock gating not supported in Goya */
|
||||
}
|
||||
|
||||
static void goya_disable_clock_gating(struct hl_device *hdev)
|
||||
{
|
||||
|
||||
/* clock gating not supported in Goya */
|
||||
}
|
||||
|
||||
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
|
||||
@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = {
|
||||
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
|
||||
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
|
||||
.send_heartbeat = goya_send_heartbeat,
|
||||
.enable_clock_gating = goya_enable_clock_gating,
|
||||
.set_clock_gating = goya_set_clock_gating,
|
||||
.disable_clock_gating = goya_disable_clock_gating,
|
||||
.debug_coresight = goya_debug_coresight,
|
||||
.is_device_idle = goya_is_device_idle,
|
||||
|
@ -578,8 +578,9 @@ enum hl_pll_frequency {
|
||||
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
|
||||
* ASID-VA-size mask.
|
||||
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
|
||||
* @enable_clock_gating: enable clock gating for reducing power consumption.
|
||||
* @disable_clock_gating: disable clock for accessing registers on HBW.
|
||||
* @set_clock_gating: enable/disable clock gating per engine according to
|
||||
* clock gating mask in hdev
|
||||
* @disable_clock_gating: disable clock gating completely
|
||||
* @debug_coresight: perform certain actions on Coresight for debugging.
|
||||
* @is_device_idle: return true if device is idle, false otherwise.
|
||||
* @soft_reset_late_init: perform certain actions needed after soft reset.
|
||||
@ -587,7 +588,11 @@ enum hl_pll_frequency {
|
||||
* @hw_queues_unlock: release H/W queues lock.
|
||||
* @get_pci_id: retrieve PCI ID.
|
||||
* @get_eeprom_data: retrieve EEPROM data from F/W.
|
||||
* @send_cpu_message: send buffer to ArmCP.
|
||||
* @send_cpu_message: send message to F/W. If the message is timedout, the
|
||||
* driver will eventually reset the device. The timeout can
|
||||
* be determined by the calling function or it can be 0 and
|
||||
* then the timeout is the default timeout for the specific
|
||||
* ASIC
|
||||
* @get_hw_state: retrieve the H/W state
|
||||
* @pci_bars_map: Map PCI BARs.
|
||||
* @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
|
||||
@ -680,7 +685,7 @@ struct hl_asic_funcs {
|
||||
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
|
||||
u32 asid, u64 va, u64 size);
|
||||
int (*send_heartbeat)(struct hl_device *hdev);
|
||||
void (*enable_clock_gating)(struct hl_device *hdev);
|
||||
void (*set_clock_gating)(struct hl_device *hdev);
|
||||
void (*disable_clock_gating)(struct hl_device *hdev);
|
||||
int (*debug_coresight)(struct hl_device *hdev, void *data);
|
||||
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
|
||||
@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts {
|
||||
* @max_power: the max power of the device, as configured by the sysadmin. This
|
||||
* value is saved so in case of hard-reset, the driver will restore
|
||||
* this value and update the F/W after the re-initialization
|
||||
* @clock_gating_mask: is clock gating enabled. bitmask that represents the
|
||||
* different engines. See debugfs-driver-habanalabs for
|
||||
* details.
|
||||
* @in_reset: is device in reset flow.
|
||||
* @curr_pll_profile: current PLL profile.
|
||||
* @cs_active_cnt: number of active command submissions on this device (active
|
||||
@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts {
|
||||
* @init_done: is the initialization of the device done.
|
||||
* @mmu_enable: is MMU enabled.
|
||||
* @mmu_huge_page_opt: is MMU huge pages optimization enabled.
|
||||
* @clock_gating: is clock gating enabled.
|
||||
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
|
||||
* @dma_mask: the dma mask that was set for this device
|
||||
* @in_debug: is device under debug. This, together with fpriv_list, enforces
|
||||
@ -1493,6 +1500,7 @@ struct hl_device {
|
||||
atomic64_t dram_used_mem;
|
||||
u64 timeout_jiffies;
|
||||
u64 max_power;
|
||||
u64 clock_gating_mask;
|
||||
atomic_t in_reset;
|
||||
enum hl_pll_frequency curr_pll_profile;
|
||||
int cs_active_cnt;
|
||||
@ -1514,7 +1522,6 @@ struct hl_device {
|
||||
u8 dram_default_page_mapping;
|
||||
u8 pmmu_huge_range;
|
||||
u8 init_done;
|
||||
u8 clock_gating;
|
||||
u8 device_cpu_disabled;
|
||||
u8 dma_mask;
|
||||
u8 in_debug;
|
||||
|
@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
|
||||
hdev->fw_loading = 1;
|
||||
hdev->cpu_queues_enable = 1;
|
||||
hdev->heartbeat = 1;
|
||||
hdev->clock_gating = 1;
|
||||
hdev->clock_gating_mask = ULONG_MAX;
|
||||
|
||||
hdev->reset_pcilink = 0;
|
||||
hdev->axi_drain = 0;
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/hwmon.h>
|
||||
|
||||
#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
|
||||
#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
|
||||
|
||||
int hl_build_hwmon_channel_info(struct hl_device *hdev,
|
||||
@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev,
|
||||
pkt.type = __cpu_to_le16(attr);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, value);
|
||||
0, value);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev,
|
||||
pkt.value = __cpu_to_le64(value);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev,
|
||||
@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev,
|
||||
pkt.type = __cpu_to_le16(attr);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, value);
|
||||
0, value);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev,
|
||||
pkt.type = __cpu_to_le16(attr);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, value);
|
||||
0, value);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
|
||||
pkt.type = __cpu_to_le16(attr);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, value);
|
||||
0, value);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
|
||||
pkt.type = __cpu_to_le16(attr);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, value);
|
||||
0, value);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
|
||||
pkt.value = cpu_to_le64(value);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev,
|
||||
@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev,
|
||||
pkt.value = __cpu_to_le64(value);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev,
|
||||
@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev,
|
||||
pkt.value = __cpu_to_le64(value);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SENSORS_PKT_TIMEOUT, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev,
|
||||
|
@ -9,9 +9,6 @@
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
|
||||
#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
|
||||
|
||||
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
|
||||
{
|
||||
struct armcp_packet pkt;
|
||||
@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
|
||||
pkt.pll_index = cpu_to_le32(pll_index);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SET_CLK_PKT_TIMEOUT, &result);
|
||||
0, &result);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
|
||||
pkt.value = cpu_to_le64(freq);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SET_CLK_PKT_TIMEOUT, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev,
|
||||
@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
|
||||
ARMCP_PKT_CTL_OPCODE_SHIFT);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SET_PWR_PKT_TIMEOUT, &result);
|
||||
0, &result);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
|
||||
@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
|
||||
pkt.value = cpu_to_le64(value);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
SET_PWR_PKT_TIMEOUT, NULL);
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
|
||||
|
@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
if (WARN_ON(clock > host->max_clk))
|
||||
clock = host->max_clk;
|
||||
|
||||
for (div = 1; div < 256; div *= 2) {
|
||||
for (div = 2; div < 256; div *= 2) {
|
||||
if ((parent / div) <= clock)
|
||||
break;
|
||||
}
|
||||
|
@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name)
|
||||
bond_dev->rtnl_link_ops = &bond_link_ops;
|
||||
|
||||
res = register_netdevice(bond_dev);
|
||||
if (res < 0) {
|
||||
free_netdev(bond_dev);
|
||||
rtnl_unlock();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
|
||||
bond_work_init_all(bond);
|
||||
|
||||
rtnl_unlock();
|
||||
if (res < 0)
|
||||
free_netdev(bond_dev);
|
||||
return res;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __net_init bond_net_init(struct net *net)
|
||||
|
@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
|
||||
return err;
|
||||
|
||||
err = register_netdevice(bond_dev);
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
if (!err) {
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
bond_work_init_all(bond);
|
||||
}
|
||||
|
||||
|
@ -974,23 +974,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
|
||||
PORT_MIRROR_SNIFFER, false);
|
||||
}
|
||||
|
||||
static void ksz9477_phy_setup(struct ksz_device *dev, int port,
|
||||
struct phy_device *phy)
|
||||
{
|
||||
/* Only apply to port with PHY. */
|
||||
if (port >= dev->phy_port_cnt)
|
||||
return;
|
||||
|
||||
/* The MAC actually cannot run in 1000 half-duplex mode. */
|
||||
phy_remove_link_mode(phy,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
||||
|
||||
/* PHY does not support gigabit. */
|
||||
if (!(dev->features & GBIT_SUPPORT))
|
||||
phy_remove_link_mode(phy,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
||||
}
|
||||
|
||||
static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
|
||||
{
|
||||
bool gbit;
|
||||
@ -1603,7 +1586,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
|
||||
.get_port_addr = ksz9477_get_port_addr,
|
||||
.cfg_port_member = ksz9477_cfg_port_member,
|
||||
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
|
||||
.phy_setup = ksz9477_phy_setup,
|
||||
.port_setup = ksz9477_port_setup,
|
||||
.r_mib_cnt = ksz9477_r_mib_cnt,
|
||||
.r_mib_pkt = ksz9477_r_mib_pkt,
|
||||
@ -1617,7 +1599,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
|
||||
|
||||
int ksz9477_switch_register(struct ksz_device *dev)
|
||||
{
|
||||
return ksz_switch_register(dev, &ksz9477_dev_ops);
|
||||
int ret, i;
|
||||
struct phy_device *phydev;
|
||||
|
||||
ret = ksz_switch_register(dev, &ksz9477_dev_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < dev->phy_port_cnt; ++i) {
|
||||
if (!dsa_is_user_port(dev->ds, i))
|
||||
continue;
|
||||
|
||||
phydev = dsa_to_port(dev->ds, i)->slave->phydev;
|
||||
|
||||
/* The MAC actually cannot run in 1000 half-duplex mode. */
|
||||
phy_remove_link_mode(phydev,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
||||
|
||||
/* PHY does not support gigabit. */
|
||||
if (!(dev->features & GBIT_SUPPORT))
|
||||
phy_remove_link_mode(phydev,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ksz9477_switch_register);
|
||||
|
||||
|
@ -358,8 +358,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
|
||||
|
||||
/* setup slave port */
|
||||
dev->dev_ops->port_setup(dev, port, false);
|
||||
if (dev->dev_ops->phy_setup)
|
||||
dev->dev_ops->phy_setup(dev, port, phy);
|
||||
|
||||
/* port_stp_state_set() will be called after to enable the port so
|
||||
* there is no need to do anything.
|
||||
|
@ -119,8 +119,6 @@ struct ksz_dev_ops {
|
||||
u32 (*get_port_addr)(int port, int offset);
|
||||
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
|
||||
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
|
||||
void (*phy_setup)(struct ksz_device *dev, int port,
|
||||
struct phy_device *phy);
|
||||
void (*port_cleanup)(struct ksz_device *dev, int port);
|
||||
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
|
||||
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
|
||||
|
@ -664,8 +664,11 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
|
||||
const struct phylink_link_state *state)
|
||||
{
|
||||
struct mv88e6xxx_chip *chip = ds->priv;
|
||||
struct mv88e6xxx_port *p;
|
||||
int err;
|
||||
|
||||
p = &chip->ports[port];
|
||||
|
||||
/* FIXME: is this the correct test? If we're in fixed mode on an
|
||||
* internal port, why should we process this any different from
|
||||
* PHY mode? On the other hand, the port may be automedia between
|
||||
@ -675,10 +678,14 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
|
||||
return;
|
||||
|
||||
mv88e6xxx_reg_lock(chip);
|
||||
/* FIXME: should we force the link down here - but if we do, how
|
||||
* do we restore the link force/unforce state? The driver layering
|
||||
* gets in the way.
|
||||
/* In inband mode, the link may come up at any time while the link
|
||||
* is not forced down. Force the link down while we reconfigure the
|
||||
* interface mode.
|
||||
*/
|
||||
if (mode == MLO_AN_INBAND && p->interface != state->interface &&
|
||||
chip->info->ops->port_set_link)
|
||||
chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
|
||||
|
||||
err = mv88e6xxx_port_config_interface(chip, port, state->interface);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
goto err_unlock;
|
||||
@ -691,6 +698,15 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
|
||||
/* Undo the forced down state above after completing configuration
|
||||
* irrespective of its state on entry, which allows the link to come up.
|
||||
*/
|
||||
if (mode == MLO_AN_INBAND && p->interface != state->interface &&
|
||||
chip->info->ops->port_set_link)
|
||||
chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
|
||||
|
||||
p->interface = state->interface;
|
||||
|
||||
err_unlock:
|
||||
mv88e6xxx_reg_unlock(chip);
|
||||
|
||||
|
@ -232,6 +232,7 @@ struct mv88e6xxx_port {
|
||||
u64 atu_full_violation;
|
||||
u64 vtu_member_violation;
|
||||
u64 vtu_miss_violation;
|
||||
phy_interface_t interface;
|
||||
u8 cmode;
|
||||
bool mirror_ingress;
|
||||
bool mirror_egress;
|
||||
|
@ -64,6 +64,7 @@ struct aq_hw_caps_s {
|
||||
u8 rx_rings;
|
||||
bool flow_control;
|
||||
bool is_64_dma;
|
||||
u32 quirks;
|
||||
u32 priv_data_len;
|
||||
};
|
||||
|
||||
|
@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self)
|
||||
self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
|
||||
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
|
||||
err = aq_phy_init(self->aq_hw);
|
||||
|
||||
/* Disable the PTP on NICs where it's known to cause datapath
|
||||
* problems.
|
||||
* Ideally this should have been done by PHY provisioning, but
|
||||
* many units have been shipped with enabled PTP block already.
|
||||
*/
|
||||
if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
|
||||
if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
|
||||
aq_phy_disable_ptp(self->aq_hw);
|
||||
}
|
||||
|
||||
for (i = 0U; i < self->aq_vecs; i++) {
|
||||
|
@ -81,6 +81,8 @@ struct aq_nic_cfg_s {
|
||||
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
|
||||
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
|
||||
|
||||
#define AQ_NIC_QUIRK_BAD_PTP BIT(0)
|
||||
|
||||
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
|
||||
WAKE_PHY)
|
||||
|
||||
|
@ -1,10 +1,14 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
|
||||
/* Atlantic Network Driver
|
||||
*
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation
|
||||
* Copyright (C) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#include "aq_phy.h"
|
||||
|
||||
#define HW_ATL_PTP_DISABLE_MSK BIT(10)
|
||||
|
||||
bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
|
||||
{
|
||||
int err = 0;
|
||||
@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
|
||||
{
|
||||
static const u16 ptp_registers[] = {
|
||||
0x031e,
|
||||
0x031d,
|
||||
0x031c,
|
||||
0x031b,
|
||||
};
|
||||
u16 val;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
|
||||
val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
|
||||
ptp_registers[i]);
|
||||
|
||||
aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
|
||||
ptp_registers[i],
|
||||
val & ~HW_ATL_PTP_DISABLE_MSK);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
|
||||
/* Atlantic Network Driver
|
||||
*
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation
|
||||
* Copyright (C) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef AQ_PHY_H
|
||||
@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
|
||||
|
||||
bool aq_phy_init(struct aq_hw_s *aq_hw);
|
||||
|
||||
void aq_phy_disable_ptp(struct aq_hw_s *aq_hw);
|
||||
|
||||
#endif /* AQ_PHY_H */
|
||||
|
@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
|
||||
AQ_NIC_RATE_100M,
|
||||
};
|
||||
|
||||
const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
|
||||
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
|
||||
.media_type = AQ_HW_MEDIA_TYPE_TP,
|
||||
.link_speed_msk = AQ_NIC_RATE_5G |
|
||||
AQ_NIC_RATE_2G5 |
|
||||
AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_100M,
|
||||
.quirks = AQ_NIC_QUIRK_BAD_PTP,
|
||||
};
|
||||
|
||||
const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
|
||||
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
|
||||
.media_type = AQ_HW_MEDIA_TYPE_TP,
|
||||
.link_speed_msk = AQ_NIC_RATE_2G5 |
|
||||
AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_100M,
|
||||
.quirks = AQ_NIC_QUIRK_BAD_PTP,
|
||||
};
|
||||
|
||||
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
|
||||
{
|
||||
int err = 0;
|
||||
@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
|
||||
|
||||
/* WSP, if min_rate is set for at least one TC.
|
||||
* RR otherwise.
|
||||
*
|
||||
* NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
|
||||
* overwrite it here in that case.
|
||||
*/
|
||||
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
|
||||
if (!nic_cfg->is_ptp)
|
||||
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
|
||||
|
||||
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
|
||||
* leave Descriptor TC Arbiter as RR.
|
||||
*/
|
||||
|
@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
|
||||
|
||||
#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
|
||||
#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112;
|
||||
|
||||
#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
|
||||
#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
|
||||
#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
|
||||
#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
|
||||
|
||||
#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
|
||||
#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
|
||||
#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111
|
||||
#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112
|
||||
|
||||
extern const struct aq_hw_ops hw_atl_ops_b0;
|
||||
|
||||
|
@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
|
||||
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
|
||||
if (IS_ERR(ag->mdio_reset)) {
|
||||
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
|
||||
return PTR_ERR(ag->mdio_reset);
|
||||
err = PTR_ERR(ag->mdio_reset);
|
||||
goto mdio_err_put_clk;
|
||||
}
|
||||
|
||||
mii_bus->name = "ag71xx_mdio";
|
||||
|
@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
|
||||
*/
|
||||
void bnxt_set_ring_params(struct bnxt *bp)
|
||||
{
|
||||
u32 ring_size, rx_size, rx_space;
|
||||
u32 ring_size, rx_size, rx_space, max_rx_cmpl;
|
||||
u32 agg_factor = 0, agg_ring_size = 0;
|
||||
|
||||
/* 8 for CRC and VLAN */
|
||||
@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
|
||||
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
|
||||
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
|
||||
|
||||
ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
|
||||
max_rx_cmpl = bp->rx_ring_size;
|
||||
/* MAX TPA needs to be added because TPA_START completions are
|
||||
* immediately recycled, so the TPA completions are not bound by
|
||||
* the RX ring size.
|
||||
*/
|
||||
if (bp->flags & BNXT_FLAG_TPA)
|
||||
max_rx_cmpl += bp->max_tpa;
|
||||
/* RX and TPA completions are 32-byte, all others are 16-byte */
|
||||
ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
|
||||
bp->cp_ring_size = ring_size;
|
||||
|
||||
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
|
||||
@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work)
|
||||
&bp->sp_event))
|
||||
bnxt_hwrm_phy_qcaps(bp);
|
||||
|
||||
if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_init_ethtool_link_settings(bp);
|
||||
|
||||
rc = bnxt_update_link(bp, true);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
if (rc)
|
||||
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
|
||||
rc);
|
||||
|
||||
if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_init_ethtool_link_settings(bp);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
|
||||
int rc;
|
||||
|
@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
|
||||
if (epause->tx_pause)
|
||||
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
|
||||
|
||||
if (netif_running(dev))
|
||||
if (netif_running(dev)) {
|
||||
mutex_lock(&bp->link_lock);
|
||||
rc = bnxt_hwrm_set_pause(bp);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -543,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
|
||||
#define VALIDATE_MASK(x) \
|
||||
bcmgenet_hfb_validate_mask(&(x), sizeof(x))
|
||||
|
||||
static int bcmgenet_hfb_insert_data(u32 *f, int offset,
|
||||
void *val, void *mask, size_t size)
|
||||
static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
|
||||
u32 offset, void *val, void *mask,
|
||||
size_t size)
|
||||
{
|
||||
int index;
|
||||
u32 tmp;
|
||||
u32 index, tmp;
|
||||
|
||||
index = offset / 2;
|
||||
tmp = f[index];
|
||||
index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
|
||||
tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
|
||||
|
||||
while (size--) {
|
||||
if (offset++ & 1) {
|
||||
@ -567,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
|
||||
tmp |= 0x10000;
|
||||
break;
|
||||
}
|
||||
f[index++] = tmp;
|
||||
bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
|
||||
if (size)
|
||||
tmp = f[index];
|
||||
tmp = bcmgenet_hfb_readl(priv,
|
||||
index * sizeof(u32));
|
||||
} else {
|
||||
tmp &= ~0xCFF00;
|
||||
tmp |= (*(unsigned char *)val++) << 8;
|
||||
@ -585,44 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
|
||||
break;
|
||||
}
|
||||
if (!size)
|
||||
f[index] = tmp;
|
||||
bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
|
||||
u32 f_length, u32 rx_queue, int f_index)
|
||||
{
|
||||
u32 base = f_index * priv->hw_params->hfb_filter_size;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < f_length; i++)
|
||||
bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
|
||||
|
||||
bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
|
||||
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
|
||||
}
|
||||
|
||||
static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
struct bcmgenet_rxnfc_rule *rule)
|
||||
static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
struct bcmgenet_rxnfc_rule *rule)
|
||||
{
|
||||
struct ethtool_rx_flow_spec *fs = &rule->fs;
|
||||
int err = 0, offset = 0, f_length = 0;
|
||||
u32 offset = 0, f_length = 0, f;
|
||||
u8 val_8, mask_8;
|
||||
__be16 val_16;
|
||||
u16 mask_16;
|
||||
size_t size;
|
||||
u32 *f_data;
|
||||
|
||||
f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
if (!f_data)
|
||||
return -ENOMEM;
|
||||
|
||||
f = fs->location;
|
||||
if (fs->flow_type & FLOW_MAC_EXT) {
|
||||
bcmgenet_hfb_insert_data(f_data, 0,
|
||||
bcmgenet_hfb_insert_data(priv, f, 0,
|
||||
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
|
||||
sizeof(fs->h_ext.h_dest));
|
||||
}
|
||||
@ -630,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
if (fs->flow_type & FLOW_EXT) {
|
||||
if (fs->m_ext.vlan_etype ||
|
||||
fs->m_ext.vlan_tci) {
|
||||
bcmgenet_hfb_insert_data(f_data, 12,
|
||||
bcmgenet_hfb_insert_data(priv, f, 12,
|
||||
&fs->h_ext.vlan_etype,
|
||||
&fs->m_ext.vlan_etype,
|
||||
sizeof(fs->h_ext.vlan_etype));
|
||||
bcmgenet_hfb_insert_data(f_data, 14,
|
||||
bcmgenet_hfb_insert_data(priv, f, 14,
|
||||
&fs->h_ext.vlan_tci,
|
||||
&fs->m_ext.vlan_tci,
|
||||
sizeof(fs->h_ext.vlan_tci));
|
||||
@ -646,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
||||
case ETHER_FLOW:
|
||||
f_length += DIV_ROUND_UP(ETH_HLEN, 2);
|
||||
bcmgenet_hfb_insert_data(f_data, 0,
|
||||
bcmgenet_hfb_insert_data(priv, f, 0,
|
||||
&fs->h_u.ether_spec.h_dest,
|
||||
&fs->m_u.ether_spec.h_dest,
|
||||
sizeof(fs->h_u.ether_spec.h_dest));
|
||||
bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
|
||||
bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
|
||||
&fs->h_u.ether_spec.h_source,
|
||||
&fs->m_u.ether_spec.h_source,
|
||||
sizeof(fs->h_u.ether_spec.h_source));
|
||||
bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
|
||||
&fs->h_u.ether_spec.h_proto,
|
||||
&fs->m_u.ether_spec.h_proto,
|
||||
sizeof(fs->h_u.ether_spec.h_proto));
|
||||
@ -664,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
/* Specify IP Ether Type */
|
||||
val_16 = htons(ETH_P_IP);
|
||||
mask_16 = 0xFFFF;
|
||||
bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
|
||||
&val_16, &mask_16, sizeof(val_16));
|
||||
bcmgenet_hfb_insert_data(f_data, 15 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 15 + offset,
|
||||
&fs->h_u.usr_ip4_spec.tos,
|
||||
&fs->m_u.usr_ip4_spec.tos,
|
||||
sizeof(fs->h_u.usr_ip4_spec.tos));
|
||||
bcmgenet_hfb_insert_data(f_data, 23 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 23 + offset,
|
||||
&fs->h_u.usr_ip4_spec.proto,
|
||||
&fs->m_u.usr_ip4_spec.proto,
|
||||
sizeof(fs->h_u.usr_ip4_spec.proto));
|
||||
bcmgenet_hfb_insert_data(f_data, 26 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 26 + offset,
|
||||
&fs->h_u.usr_ip4_spec.ip4src,
|
||||
&fs->m_u.usr_ip4_spec.ip4src,
|
||||
sizeof(fs->h_u.usr_ip4_spec.ip4src));
|
||||
bcmgenet_hfb_insert_data(f_data, 30 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 30 + offset,
|
||||
&fs->h_u.usr_ip4_spec.ip4dst,
|
||||
&fs->m_u.usr_ip4_spec.ip4dst,
|
||||
sizeof(fs->h_u.usr_ip4_spec.ip4dst));
|
||||
@ -688,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
/* Only supports 20 byte IPv4 header */
|
||||
val_8 = 0x45;
|
||||
mask_8 = 0xFF;
|
||||
bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
|
||||
&val_8, &mask_8,
|
||||
sizeof(val_8));
|
||||
size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
|
||||
bcmgenet_hfb_insert_data(f_data,
|
||||
bcmgenet_hfb_insert_data(priv, f,
|
||||
ETH_HLEN + 20 + offset,
|
||||
&fs->h_u.usr_ip4_spec.l4_4_bytes,
|
||||
&fs->m_u.usr_ip4_spec.l4_4_bytes,
|
||||
@ -701,34 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
break;
|
||||
}
|
||||
|
||||
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
|
||||
if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
|
||||
/* Ring 0 flows can be handled by the default Descriptor Ring
|
||||
* We'll map them to ring 0, but don't enable the filter
|
||||
*/
|
||||
bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
|
||||
fs->location);
|
||||
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
|
||||
rule->state = BCMGENET_RXNFC_STATE_DISABLED;
|
||||
} else {
|
||||
/* Other Rx rings are direct mapped here */
|
||||
bcmgenet_hfb_set_filter(priv, f_data, f_length,
|
||||
fs->ring_cookie, fs->location);
|
||||
bcmgenet_hfb_enable_filter(priv, fs->location);
|
||||
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
|
||||
fs->ring_cookie);
|
||||
bcmgenet_hfb_enable_filter(priv, f);
|
||||
rule->state = BCMGENET_RXNFC_STATE_ENABLED;
|
||||
}
|
||||
|
||||
kfree(f_data);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* bcmgenet_hfb_clear
|
||||
*
|
||||
* Clear Hardware Filter Block and disable all filtering.
|
||||
*/
|
||||
static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
|
||||
{
|
||||
u32 base, i;
|
||||
|
||||
base = f_index * priv->hw_params->hfb_filter_size;
|
||||
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
|
||||
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
|
||||
}
|
||||
|
||||
static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
|
||||
return;
|
||||
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
|
||||
@ -740,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0,
|
||||
HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
|
||||
|
||||
for (i = 0; i < priv->hw_params->hfb_filter_cnt *
|
||||
priv->hw_params->hfb_filter_size; i++)
|
||||
bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
|
||||
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
|
||||
bcmgenet_hfb_clear_filter(priv, i);
|
||||
}
|
||||
|
||||
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&priv->rxnfc_list);
|
||||
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&priv->rxnfc_list);
|
||||
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
|
||||
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
|
||||
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
|
||||
@ -1437,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev,
|
||||
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
|
||||
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
|
||||
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
|
||||
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
|
||||
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
|
||||
list_del(&loc_rule->list);
|
||||
bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
|
||||
}
|
||||
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
|
||||
memcpy(&loc_rule->fs, &cmd->fs,
|
||||
sizeof(struct ethtool_rx_flow_spec));
|
||||
|
||||
err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
|
||||
if (err) {
|
||||
netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
|
||||
|
||||
list_add_tail(&loc_rule->list, &priv->rxnfc_list);
|
||||
|
||||
@ -1473,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
|
||||
|
||||
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
|
||||
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
|
||||
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
|
||||
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
|
||||
list_del(&rule->list);
|
||||
bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
|
||||
}
|
||||
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
|
||||
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
|
||||
|
||||
@ -3999,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err)
|
||||
goto err;
|
||||
goto err_clk_disable;
|
||||
|
||||
/* Mii wait queue */
|
||||
init_waitqueue_head(&priv->wq);
|
||||
@ -4011,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(priv->clk_wol)) {
|
||||
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
|
||||
err = PTR_ERR(priv->clk_wol);
|
||||
goto err;
|
||||
goto err_clk_disable;
|
||||
}
|
||||
|
||||
priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
|
||||
if (IS_ERR(priv->clk_eee)) {
|
||||
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
|
||||
err = PTR_ERR(priv->clk_eee);
|
||||
goto err;
|
||||
goto err_clk_disable;
|
||||
}
|
||||
|
||||
/* If this is an internal GPHY, power it on now, before UniMAC is
|
||||
@ -4129,8 +4118,9 @@ static int bcmgenet_resume(struct device *d)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(d);
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct bcmgenet_rxnfc_rule *rule;
|
||||
unsigned long dma_ctrl;
|
||||
u32 offset, reg;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
if (!netif_running(dev))
|
||||
@ -4161,10 +4151,11 @@ static int bcmgenet_resume(struct device *d)
|
||||
|
||||
bcmgenet_set_hw_addr(priv, dev->dev_addr);
|
||||
|
||||
offset = HFB_FLT_ENABLE_V3PLUS;
|
||||
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
|
||||
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
|
||||
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
|
||||
/* Restore hardware filters */
|
||||
bcmgenet_hfb_clear(priv);
|
||||
list_for_each_entry(rule, &priv->rxnfc_list, list)
|
||||
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
|
||||
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
|
||||
|
||||
if (priv->internal_phy) {
|
||||
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
|
||||
@ -4208,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(d);
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
u32 offset;
|
||||
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
@ -4220,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d)
|
||||
if (!device_may_wakeup(d))
|
||||
phy_suspend(dev->phydev);
|
||||
|
||||
/* Preserve filter state and disable filtering */
|
||||
priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
|
||||
offset = HFB_FLT_ENABLE_V3PLUS;
|
||||
priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
|
||||
priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
|
||||
/* Disable filtering */
|
||||
bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
|
||||
|
||||
return 0;
|
||||
|
@ -696,7 +696,6 @@ struct bcmgenet_priv {
|
||||
u32 wolopts;
|
||||
u8 sopass[SOPASS_MAX];
|
||||
bool wol_active;
|
||||
u32 hfb_en[3];
|
||||
|
||||
struct bcmgenet_mib_counters mib;
|
||||
|
||||
|
@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
|
||||
|
||||
priv->wol_active = 0;
|
||||
clk_disable_unprepare(priv->clk_wol);
|
||||
priv->crc_fwd_en = 0;
|
||||
|
||||
/* Disable Magic Packet Detection */
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
|
||||
reg &= ~(MPD_EN | MPD_PW_EN);
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
|
||||
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
|
||||
if (!(reg & MPD_EN))
|
||||
return; /* already reset so skip the rest */
|
||||
reg &= ~(MPD_EN | MPD_PW_EN);
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
|
||||
}
|
||||
|
||||
/* Disable WAKE_FILTER Detection */
|
||||
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
|
||||
reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
|
||||
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
|
||||
if (priv->wolopts & WAKE_FILTER) {
|
||||
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
|
||||
if (!(reg & RBUF_ACPI_EN))
|
||||
return; /* already reset so skip the rest */
|
||||
reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
|
||||
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
|
||||
}
|
||||
|
||||
/* Disable CRC Forward */
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
reg &= ~CMD_CRC_FWD;
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
priv->crc_fwd_en = 0;
|
||||
}
|
||||
|
@ -3736,7 +3736,7 @@ static int macb_init(struct platform_device *pdev)
|
||||
|
||||
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
|
||||
val = 0;
|
||||
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
|
||||
if (phy_interface_mode_is_rgmii(bp->phy_interface))
|
||||
val = GEM_BIT(RGMII);
|
||||
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
|
||||
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
|
||||
|
@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
|
||||
txq_info = adap->sge.uld_txq_info[tx_uld_type];
|
||||
if (unlikely(!txq_info)) {
|
||||
WARN_ON(true);
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
|
@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||
DMA_BIT_MASK(40));
|
||||
if (err) {
|
||||
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
|
||||
return err;
|
||||
goto free_netdev;
|
||||
}
|
||||
|
||||
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
|
||||
|
@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
|
||||
|
||||
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
|
||||
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
|
||||
if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
|
||||
if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
|
||||
return 0;
|
||||
|
||||
if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
|
||||
|
@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
err_reg_netdev:
|
||||
enetc_mdio_remove(pf);
|
||||
enetc_of_put_phy(priv);
|
||||
enetc_free_msix(priv);
|
||||
err_alloc_msix:
|
||||
|
@ -590,6 +590,7 @@ struct fec_enet_private {
|
||||
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
|
||||
void fec_ptp_stop(struct platform_device *pdev);
|
||||
void fec_ptp_start_cyclecounter(struct net_device *ndev);
|
||||
void fec_ptp_disable_hwts(struct net_device *ndev);
|
||||
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
|
||||
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
|
||||
|
||||
|
@ -1294,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
|
||||
fep->bufdesc_ex) {
|
||||
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
|
||||
* are to time stamp the packet, so we still need to check time
|
||||
* stamping enabled flag.
|
||||
*/
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
|
||||
fep->hwts_tx_en) &&
|
||||
fep->bufdesc_ex) {
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
|
||||
|
||||
@ -2723,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
|
||||
return -ENODEV;
|
||||
|
||||
if (fep->bufdesc_ex) {
|
||||
if (cmd == SIOCSHWTSTAMP)
|
||||
return fec_ptp_set(ndev, rq);
|
||||
if (cmd == SIOCGHWTSTAMP)
|
||||
return fec_ptp_get(ndev, rq);
|
||||
bool use_fec_hwts = !phy_has_hwtstamp(phydev);
|
||||
|
||||
if (cmd == SIOCSHWTSTAMP) {
|
||||
if (use_fec_hwts)
|
||||
return fec_ptp_set(ndev, rq);
|
||||
fec_ptp_disable_hwts(ndev);
|
||||
} else if (cmd == SIOCGHWTSTAMP) {
|
||||
if (use_fec_hwts)
|
||||
return fec_ptp_get(ndev, rq);
|
||||
}
|
||||
}
|
||||
|
||||
return phy_mii_ioctl(phydev, rq, cmd);
|
||||
|
@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* fec_ptp_disable_hwts - disable hardware time stamping
|
||||
* @ndev: pointer to net_device
|
||||
*/
|
||||
void fec_ptp_disable_hwts(struct net_device *ndev)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
fep->hwts_tx_en = 0;
|
||||
fep->hwts_rx_en = 0;
|
||||
}
|
||||
|
||||
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
|
||||
|
||||
mac_addr = of_get_mac_address(np);
|
||||
|
||||
if (!IS_ERR(mac_addr))
|
||||
if (!IS_ERR(mac_addr)) {
|
||||
ether_addr_copy(dev->dev_addr, mac_addr);
|
||||
} else {
|
||||
eth_hw_addr_random(dev);
|
||||
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
|
||||
}
|
||||
|
||||
if (model && !strcasecmp(model, "TSEC"))
|
||||
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
|
||||
|
@ -77,6 +77,7 @@
|
||||
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
|
||||
|
||||
enum hns_desc_type {
|
||||
DESC_TYPE_UNKNOWN,
|
||||
DESC_TYPE_SKB,
|
||||
DESC_TYPE_FRAGLIST_SKB,
|
||||
DESC_TYPE_PAGE,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user