mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up the fixes that went upstream via acme/perf/urgent and to get to v5.19. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
18808564aa
4
.mailmap
4
.mailmap
@ -60,6 +60,10 @@ Arnd Bergmann <arnd@arndb.de>
|
||||
Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
|
||||
Axel Dyks <xl@xlsigned.net>
|
||||
Axel Lin <axel.lin@gmail.com>
|
||||
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
|
||||
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
|
||||
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
|
||||
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
||||
Ben Gardner <bgardner@wabtec.com>
|
||||
|
@ -3176,6 +3176,7 @@
|
||||
no_entry_flush [PPC]
|
||||
no_uaccess_flush [PPC]
|
||||
mmio_stale_data=off [X86]
|
||||
retbleed=off [X86]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@ -3198,6 +3199,7 @@
|
||||
mds=full,nosmt [X86]
|
||||
tsx_async_abort=full,nosmt [X86]
|
||||
mmio_stale_data=full,nosmt [X86]
|
||||
retbleed=auto,nosmt [X86]
|
||||
|
||||
mminit_loglevel=
|
||||
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
||||
|
@ -167,70 +167,65 @@ properties:
|
||||
- in-band-status
|
||||
|
||||
fixed-link:
|
||||
allOf:
|
||||
- if:
|
||||
type: array
|
||||
then:
|
||||
deprecated: true
|
||||
items:
|
||||
- minimum: 0
|
||||
maximum: 31
|
||||
description:
|
||||
Emulated PHY ID, choose any but unique to the all
|
||||
specified fixed-links
|
||||
oneOf:
|
||||
- $ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
deprecated: true
|
||||
items:
|
||||
- minimum: 0
|
||||
maximum: 31
|
||||
description:
|
||||
Emulated PHY ID, choose any but unique to the all
|
||||
specified fixed-links
|
||||
|
||||
- enum: [0, 1]
|
||||
description:
|
||||
Duplex configuration. 0 for half duplex or 1 for
|
||||
full duplex
|
||||
- enum: [0, 1]
|
||||
description:
|
||||
Duplex configuration. 0 for half duplex or 1 for
|
||||
full duplex
|
||||
|
||||
- enum: [10, 100, 1000, 2500, 10000]
|
||||
description:
|
||||
Link speed in Mbits/sec.
|
||||
- enum: [10, 100, 1000, 2500, 10000]
|
||||
description:
|
||||
Link speed in Mbits/sec.
|
||||
|
||||
- enum: [0, 1]
|
||||
description:
|
||||
Pause configuration. 0 for no pause, 1 for pause
|
||||
- enum: [0, 1]
|
||||
description:
|
||||
Pause configuration. 0 for no pause, 1 for pause
|
||||
|
||||
- enum: [0, 1]
|
||||
description:
|
||||
Asymmetric pause configuration. 0 for no asymmetric
|
||||
pause, 1 for asymmetric pause
|
||||
- enum: [0, 1]
|
||||
description:
|
||||
Asymmetric pause configuration. 0 for no asymmetric
|
||||
pause, 1 for asymmetric pause
|
||||
- type: object
|
||||
additionalProperties: false
|
||||
properties:
|
||||
speed:
|
||||
description:
|
||||
Link speed.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [10, 100, 1000, 2500, 10000]
|
||||
|
||||
full-duplex:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
Indicates that full-duplex is used. When absent, half
|
||||
duplex is assumed.
|
||||
|
||||
- if:
|
||||
type: object
|
||||
then:
|
||||
properties:
|
||||
speed:
|
||||
description:
|
||||
Link speed.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [10, 100, 1000, 2500, 10000]
|
||||
pause:
|
||||
$ref: /schemas/types.yaml#definitions/flag
|
||||
description:
|
||||
Indicates that pause should be enabled.
|
||||
|
||||
full-duplex:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
Indicates that full-duplex is used. When absent, half
|
||||
duplex is assumed.
|
||||
asym-pause:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
Indicates that asym_pause should be enabled.
|
||||
|
||||
pause:
|
||||
$ref: /schemas/types.yaml#definitions/flag
|
||||
description:
|
||||
Indicates that pause should be enabled.
|
||||
link-gpios:
|
||||
maxItems: 1
|
||||
description:
|
||||
GPIO to determine if the link is up
|
||||
|
||||
asym-pause:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
Indicates that asym_pause should be enabled.
|
||||
|
||||
link-gpios:
|
||||
maxItems: 1
|
||||
description:
|
||||
GPIO to determine if the link is up
|
||||
|
||||
required:
|
||||
- speed
|
||||
required:
|
||||
- speed
|
||||
|
||||
additionalProperties: true
|
||||
|
||||
|
@ -183,6 +183,7 @@ properties:
|
||||
Should specify the gpio for phy reset.
|
||||
|
||||
phy-reset-duration:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
deprecated: true
|
||||
description:
|
||||
Reset duration in milliseconds. Should present only if property
|
||||
@ -191,12 +192,14 @@ properties:
|
||||
and 1 millisecond will be used instead.
|
||||
|
||||
phy-reset-active-high:
|
||||
type: boolean
|
||||
deprecated: true
|
||||
description:
|
||||
If present then the reset sequence using the GPIO specified in the
|
||||
"phy-reset-gpios" property is reversed (H=reset state, L=operation state).
|
||||
|
||||
phy-reset-post-delay:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
deprecated: true
|
||||
description:
|
||||
Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay
|
||||
|
@ -2866,7 +2866,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
|
||||
Default: 4K
|
||||
|
||||
sctp_wmem - vector of 3 INTEGERs: min, default, max
|
||||
Currently this tunable has no effect.
|
||||
Only the first value ("min") is used, "default" and "max" are
|
||||
ignored.
|
||||
|
||||
min: Minimum size of send buffer that can be used by SCTP sockets.
|
||||
It is guaranteed to each SCTP socket (but not association) even
|
||||
under moderate memory pressure.
|
||||
|
||||
Default: 4K
|
||||
|
||||
addr_scope_policy - INTEGER
|
||||
Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc8
|
||||
EXTRAVERSION =
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -38,7 +38,7 @@
|
||||
sys_clk: sys_clk {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <162500000>;
|
||||
clock-frequency = <165625000>;
|
||||
};
|
||||
|
||||
cpu_clk: cpu_clk {
|
||||
|
@ -10,7 +10,7 @@
|
||||
#else
|
||||
#define MAX_DMA_ADDRESS ({ \
|
||||
extern phys_addr_t arm_dma_zone_size; \
|
||||
arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
|
||||
arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
|
||||
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
|
||||
#endif
|
||||
|
||||
|
@ -40,8 +40,8 @@ ENDPROC(_find_first_zero_bit_le)
|
||||
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
|
||||
*/
|
||||
ENTRY(_find_next_zero_bit_le)
|
||||
teq r1, #0
|
||||
beq 3b
|
||||
cmp r2, r1
|
||||
bhs 3b
|
||||
ands ip, r2, #7
|
||||
beq 1b @ If new byte, goto old routine
|
||||
ARM( ldrb r3, [r0, r2, lsr #3] )
|
||||
@ -81,8 +81,8 @@ ENDPROC(_find_first_bit_le)
|
||||
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
|
||||
*/
|
||||
ENTRY(_find_next_bit_le)
|
||||
teq r1, #0
|
||||
beq 3b
|
||||
cmp r2, r1
|
||||
bhs 3b
|
||||
ands ip, r2, #7
|
||||
beq 1b @ If new byte, goto old routine
|
||||
ARM( ldrb r3, [r0, r2, lsr #3] )
|
||||
@ -115,8 +115,8 @@ ENTRY(_find_first_zero_bit_be)
|
||||
ENDPROC(_find_first_zero_bit_be)
|
||||
|
||||
ENTRY(_find_next_zero_bit_be)
|
||||
teq r1, #0
|
||||
beq 3b
|
||||
cmp r2, r1
|
||||
bhs 3b
|
||||
ands ip, r2, #7
|
||||
beq 1b @ If new byte, goto old routine
|
||||
eor r3, r2, #0x18 @ big endian byte ordering
|
||||
@ -149,8 +149,8 @@ ENTRY(_find_first_bit_be)
|
||||
ENDPROC(_find_first_bit_be)
|
||||
|
||||
ENTRY(_find_next_bit_be)
|
||||
teq r1, #0
|
||||
beq 3b
|
||||
cmp r2, r1
|
||||
bhs 3b
|
||||
ands ip, r2, #7
|
||||
beq 1b @ If new byte, goto old routine
|
||||
eor r3, r2, #0x18 @ big endian byte ordering
|
||||
|
@ -549,7 +549,7 @@ static struct pxa2xx_spi_controller corgi_spi_info = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table corgi_spi_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.1",
|
||||
.dev_id = "spi1",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
|
||||
|
@ -635,7 +635,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.2",
|
||||
.dev_id = "spi2",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
|
@ -140,7 +140,7 @@ struct platform_device pxa_spi_ssp4 = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.3",
|
||||
.dev_id = "spi3",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW),
|
||||
@ -149,7 +149,7 @@ static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table pxa_ssp4_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.4",
|
||||
.dev_id = "spi4",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW),
|
||||
|
@ -207,7 +207,7 @@ static struct spi_board_info littleton_spi_devices[] __initdata = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table littleton_spi_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.2",
|
||||
.dev_id = "spi2",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
|
@ -994,7 +994,7 @@ static struct pxa2xx_spi_controller magician_spi_info = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table magician_spi_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.2",
|
||||
.dev_id = "spi2",
|
||||
.table = {
|
||||
/* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
|
@ -578,7 +578,7 @@ static struct pxa2xx_spi_controller spitz_spi_info = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table spitz_spi_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.2",
|
||||
.dev_id = "spi2",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
|
||||
|
@ -623,7 +623,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.1",
|
||||
.dev_id = "spi1",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
@ -631,7 +631,7 @@ static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
|
||||
.dev_id = "pxa2xx-spi.2",
|
||||
.dev_id = "spi2",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
|
@ -69,7 +69,6 @@ config LOONGARCH
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select GPIOLIB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_COMPILER_H
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
@ -274,16 +274,4 @@
|
||||
nor \dst, \src, zero
|
||||
.endm
|
||||
|
||||
.macro bgt r0 r1 label
|
||||
blt \r1, \r0, \label
|
||||
.endm
|
||||
|
||||
.macro bltz r0 label
|
||||
blt \r0, zero, \label
|
||||
.endm
|
||||
|
||||
.macro bgez r0 label
|
||||
bge \r0, zero, \label
|
||||
.endm
|
||||
|
||||
#endif /* _ASM_ASMMACRO_H */
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
#if __SIZEOF_LONG__ == 4
|
||||
#define __LL "ll.w "
|
||||
@ -157,27 +156,25 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
|
||||
__asm__ __volatile__(
|
||||
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
|
||||
" addi.w %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beq $zero, %1, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "I" (-i));
|
||||
} else {
|
||||
__asm__ __volatile__(
|
||||
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
|
||||
" sub.w %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beq $zero, %1, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "r" (i));
|
||||
}
|
||||
|
||||
@ -320,27 +317,25 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
|
||||
__asm__ __volatile__(
|
||||
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
|
||||
" addi.d %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.d %1, %2 \n"
|
||||
" beq %1, $zero, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "I" (-i));
|
||||
} else {
|
||||
__asm__ __volatile__(
|
||||
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
|
||||
" sub.d %0, %1, %3 \n"
|
||||
" or %1, %0, $zero \n"
|
||||
" blt %0, $zero, 2f \n"
|
||||
" move %1, %0 \n"
|
||||
" bltz %0, 2f \n"
|
||||
" sc.d %1, %2 \n"
|
||||
" beq %1, $zero, 1b \n"
|
||||
" beqz %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
|
||||
: "r" (i));
|
||||
}
|
||||
|
||||
|
@ -48,9 +48,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
__asm__ __volatile__(
|
||||
"sltu %0, %1, %2\n\t"
|
||||
#if (__SIZEOF_LONG__ == 4)
|
||||
"sub.w %0, $r0, %0\n\t"
|
||||
"sub.w %0, $zero, %0\n\t"
|
||||
#elif (__SIZEOF_LONG__ == 8)
|
||||
"sub.d %0, $r0, %0\n\t"
|
||||
"sub.d %0, $zero, %0\n\t"
|
||||
#endif
|
||||
: "=r" (mask)
|
||||
: "r" (index), "r" (size)
|
||||
|
@ -55,9 +55,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
__asm__ __volatile__( \
|
||||
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
|
||||
" bne %0, %z3, 2f \n" \
|
||||
" or $t0, %z4, $zero \n" \
|
||||
" move $t0, %z4 \n" \
|
||||
" " st " $t0, %1 \n" \
|
||||
" beq $zero, $t0, 1b \n" \
|
||||
" beqz $t0, 1b \n" \
|
||||
"2: \n" \
|
||||
__WEAK_LLSC_MB \
|
||||
: "=&r" (__ret), "=ZB"(*m) \
|
||||
|
@ -1,15 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef _ASM_COMPILER_H
|
||||
#define _ASM_COMPILER_H
|
||||
|
||||
#define GCC_OFF_SMALL_ASM() "ZC"
|
||||
|
||||
#define LOONGARCH_ISA_LEVEL "loongarch"
|
||||
#define LOONGARCH_ISA_ARCH_LEVEL "arch=loongarch"
|
||||
#define LOONGARCH_ISA_LEVEL_RAW loongarch
|
||||
#define LOONGARCH_ISA_ARCH_LEVEL_RAW LOONGARCH_ISA_LEVEL_RAW
|
||||
|
||||
#endif /* _ASM_COMPILER_H */
|
@ -288,8 +288,6 @@ struct arch_elf_state {
|
||||
.interp_fp_abi = LOONGARCH_ABI_FP_ANY, \
|
||||
}
|
||||
|
||||
#define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
|
||||
|
||||
extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
|
||||
bool is_interp, struct arch_elf_state *state);
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
@ -17,7 +16,7 @@
|
||||
"1: ll.w %1, %4 # __futex_atomic_op\n" \
|
||||
" " insn " \n" \
|
||||
"2: sc.w $t0, %2 \n" \
|
||||
" beq $t0, $zero, 1b \n" \
|
||||
" beqz $t0, 1b \n" \
|
||||
"3: \n" \
|
||||
" .section .fixup,\"ax\" \n" \
|
||||
"4: li.w %0, %6 \n" \
|
||||
@ -82,9 +81,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
|
||||
"# futex_atomic_cmpxchg_inatomic \n"
|
||||
"1: ll.w %1, %3 \n"
|
||||
" bne %1, %z4, 3f \n"
|
||||
" or $t0, %z5, $zero \n"
|
||||
" move $t0, %z5 \n"
|
||||
"2: sc.w $t0, %2 \n"
|
||||
" beq $zero, $t0, 1b \n"
|
||||
" beqz $t0, 1b \n"
|
||||
"3: \n"
|
||||
__WEAK_LLSC_MB
|
||||
" .section .fixup,\"ax\" \n"
|
||||
@ -95,8 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
|
||||
" "__UA_ADDR "\t1b, 4b \n"
|
||||
" "__UA_ADDR "\t2b, 4b \n"
|
||||
" .previous \n"
|
||||
: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
|
||||
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
|
||||
: "+r" (ret), "=&r" (val), "=ZC" (*uaddr)
|
||||
: "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval),
|
||||
"i" (-EFAULT)
|
||||
: "memory", "t0");
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/loongarch.h>
|
||||
|
||||
static inline void arch_local_irq_enable(void)
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
typedef struct {
|
||||
atomic_long_t a;
|
||||
|
@ -39,18 +39,6 @@ extern const struct plat_smp_ops loongson3_smp_ops;
|
||||
|
||||
#define MAX_PACKAGES 16
|
||||
|
||||
/* Chip Config register of each physical cpu package */
|
||||
extern u64 loongson_chipcfg[MAX_PACKAGES];
|
||||
#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
|
||||
|
||||
/* Chip Temperature register of each physical cpu package */
|
||||
extern u64 loongson_chiptemp[MAX_PACKAGES];
|
||||
#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
|
||||
|
||||
/* Freq Control register of each physical cpu package */
|
||||
extern u64 loongson_freqctrl[MAX_PACKAGES];
|
||||
#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
|
||||
|
||||
#define xconf_readl(addr) readl(addr)
|
||||
#define xconf_readq(addr) readq(addr)
|
||||
|
||||
@ -58,7 +46,7 @@ static inline void xconf_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile (
|
||||
" st.w %[v], %[hw], 0 \n"
|
||||
" ld.b $r0, %[hw], 0 \n"
|
||||
" ld.b $zero, %[hw], 0 \n"
|
||||
:
|
||||
: [hw] "r" (addr), [v] "r" (val)
|
||||
);
|
||||
@ -68,7 +56,7 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile (
|
||||
" st.d %[v], %[hw], 0 \n"
|
||||
" ld.b $r0, %[hw], 0 \n"
|
||||
" ld.b $zero, %[hw], 0 \n"
|
||||
:
|
||||
: [hw] "r" (addr), [v] "r" (val64)
|
||||
);
|
||||
|
@ -23,13 +23,13 @@
|
||||
static __always_inline void prepare_frametrace(struct pt_regs *regs)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
/* Save $r1 */
|
||||
/* Save $ra */
|
||||
STORE_ONE_REG(1)
|
||||
/* Use $r1 to save PC */
|
||||
"pcaddi $r1, 0\n\t"
|
||||
STR_LONG_S " $r1, %0\n\t"
|
||||
/* Restore $r1 */
|
||||
STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
|
||||
/* Use $ra to save PC */
|
||||
"pcaddi $ra, 0\n\t"
|
||||
STR_LONG_S " $ra, %0\n\t"
|
||||
/* Restore $ra */
|
||||
STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t"
|
||||
STORE_ONE_REG(2)
|
||||
STORE_ONE_REG(3)
|
||||
STORE_ONE_REG(4)
|
||||
|
@ -44,14 +44,14 @@ struct thread_info {
|
||||
}
|
||||
|
||||
/* How to get the thread information struct from C. */
|
||||
register struct thread_info *__current_thread_info __asm__("$r2");
|
||||
register struct thread_info *__current_thread_info __asm__("$tp");
|
||||
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
return __current_thread_info;
|
||||
}
|
||||
|
||||
register unsigned long current_stack_pointer __asm__("$r3");
|
||||
register unsigned long current_stack_pointer __asm__("$sp");
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
@ -162,7 +162,7 @@ do { \
|
||||
"2: \n" \
|
||||
" .section .fixup,\"ax\" \n" \
|
||||
"3: li.w %0, %3 \n" \
|
||||
" or %1, $r0, $r0 \n" \
|
||||
" move %1, $zero \n" \
|
||||
" b 2b \n" \
|
||||
" .previous \n" \
|
||||
" .section __ex_table,\"a\" \n" \
|
||||
|
@ -4,8 +4,9 @@
|
||||
*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <asm/cpu-info.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu-info.h>
|
||||
|
||||
/* Populates leaf and increments to next leaf */
|
||||
#define populate_cache(cache, leaf, c_level, c_type) \
|
||||
@ -17,6 +18,8 @@ do { \
|
||||
leaf->ways_of_associativity = c->cache.ways; \
|
||||
leaf->size = c->cache.linesz * c->cache.sets * \
|
||||
c->cache.ways; \
|
||||
if (leaf->level > 2) \
|
||||
leaf->size *= nodes_per_package; \
|
||||
leaf++; \
|
||||
} while (0)
|
||||
|
||||
@ -95,11 +98,15 @@ static void cache_cpumap_setup(unsigned int cpu)
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
int level = 1;
|
||||
int level = 1, nodes_per_package = 1;
|
||||
struct cpuinfo_loongarch *c = ¤t_cpu_data;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
|
||||
if (loongson_sysconf.nr_nodes > 1)
|
||||
nodes_per_package = loongson_sysconf.cores_per_package
|
||||
/ loongson_sysconf.cores_per_node;
|
||||
|
||||
if (c->icache.waysize) {
|
||||
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
|
||||
populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
|
||||
|
@ -27,7 +27,7 @@ SYM_FUNC_START(handle_syscall)
|
||||
|
||||
addi.d sp, sp, -PT_SIZE
|
||||
cfi_st t2, PT_R3
|
||||
cfi_rel_offset sp, PT_R3
|
||||
cfi_rel_offset sp, PT_R3
|
||||
st.d zero, sp, PT_R0
|
||||
csrrd t2, LOONGARCH_CSR_PRMD
|
||||
st.d t2, sp, PT_PRMD
|
||||
@ -50,7 +50,7 @@ SYM_FUNC_START(handle_syscall)
|
||||
cfi_st a7, PT_R11
|
||||
csrrd ra, LOONGARCH_CSR_ERA
|
||||
st.d ra, sp, PT_ERA
|
||||
cfi_rel_offset ra, PT_ERA
|
||||
cfi_rel_offset ra, PT_ERA
|
||||
|
||||
cfi_st tp, PT_R2
|
||||
cfi_st u0, PT_R21
|
||||
|
@ -17,21 +17,6 @@ u64 efi_system_table;
|
||||
struct loongson_system_configuration loongson_sysconf;
|
||||
EXPORT_SYMBOL(loongson_sysconf);
|
||||
|
||||
u64 loongson_chipcfg[MAX_PACKAGES];
|
||||
u64 loongson_chiptemp[MAX_PACKAGES];
|
||||
u64 loongson_freqctrl[MAX_PACKAGES];
|
||||
unsigned long long smp_group[MAX_PACKAGES];
|
||||
|
||||
static void __init register_addrs_set(u64 *registers, const u64 addr, int num)
|
||||
{
|
||||
u64 i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
*registers = (i << 44) | addr;
|
||||
registers++;
|
||||
}
|
||||
}
|
||||
|
||||
void __init init_environ(void)
|
||||
{
|
||||
int efi_boot = fw_arg0;
|
||||
@ -50,11 +35,6 @@ void __init init_environ(void)
|
||||
efi_memmap_init_early(&data);
|
||||
memblock_reserve(data.phys_map & PAGE_MASK,
|
||||
PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
|
||||
|
||||
register_addrs_set(smp_group, TO_UNCACHE(0x1fe01000), 16);
|
||||
register_addrs_set(loongson_chipcfg, TO_UNCACHE(0x1fe00180), 16);
|
||||
register_addrs_set(loongson_chiptemp, TO_UNCACHE(0x1fe0019c), 16);
|
||||
register_addrs_set(loongson_freqctrl, TO_UNCACHE(0x1fe001d0), 16);
|
||||
}
|
||||
|
||||
static int __init init_cpu_fullname(void)
|
||||
|
@ -27,78 +27,78 @@
|
||||
.endm
|
||||
|
||||
.macro sc_save_fp base
|
||||
EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
.endm
|
||||
|
||||
.macro sc_restore_fp base
|
||||
EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
|
||||
EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
|
||||
EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
|
||||
EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
|
||||
EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
|
||||
EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
|
||||
EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
|
||||
EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
|
||||
EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
|
||||
EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
|
||||
EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
|
||||
EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
|
||||
EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
|
||||
EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
|
||||
EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
|
||||
EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
|
||||
EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
|
||||
EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
|
||||
EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
|
||||
EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
|
||||
EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
|
||||
EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
|
||||
EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
|
||||
EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
|
||||
EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
|
||||
EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
|
||||
EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
|
||||
EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
|
||||
EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
|
||||
EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
|
||||
EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
|
||||
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
|
||||
.endm
|
||||
|
||||
.macro sc_save_fcc base, tmp0, tmp1
|
||||
movcf2gr \tmp0, $fcc0
|
||||
move \tmp1, \tmp0
|
||||
move \tmp1, \tmp0
|
||||
movcf2gr \tmp0, $fcc1
|
||||
bstrins.d \tmp1, \tmp0, 15, 8
|
||||
movcf2gr \tmp0, $fcc2
|
||||
@ -113,11 +113,11 @@
|
||||
bstrins.d \tmp1, \tmp0, 55, 48
|
||||
movcf2gr \tmp0, $fcc7
|
||||
bstrins.d \tmp1, \tmp0, 63, 56
|
||||
EX st.d \tmp1, \base, 0
|
||||
EX st.d \tmp1, \base, 0
|
||||
.endm
|
||||
|
||||
.macro sc_restore_fcc base, tmp0, tmp1
|
||||
EX ld.d \tmp0, \base, 0
|
||||
EX ld.d \tmp0, \base, 0
|
||||
bstrpick.d \tmp1, \tmp0, 7, 0
|
||||
movgr2cf $fcc0, \tmp1
|
||||
bstrpick.d \tmp1, \tmp0, 15, 8
|
||||
@ -138,11 +138,11 @@
|
||||
|
||||
.macro sc_save_fcsr base, tmp0
|
||||
movfcsr2gr \tmp0, fcsr0
|
||||
EX st.w \tmp0, \base, 0
|
||||
EX st.w \tmp0, \base, 0
|
||||
.endm
|
||||
|
||||
.macro sc_restore_fcsr base, tmp0
|
||||
EX ld.w \tmp0, \base, 0
|
||||
EX ld.w \tmp0, \base, 0
|
||||
movgr2fcsr fcsr0, \tmp0
|
||||
.endm
|
||||
|
||||
@ -151,9 +151,9 @@
|
||||
*/
|
||||
SYM_FUNC_START(_save_fp)
|
||||
fpu_save_csr a0 t1
|
||||
fpu_save_double a0 t1 # clobbers t1
|
||||
fpu_save_double a0 t1 # clobbers t1
|
||||
fpu_save_cc a0 t1 t2 # clobbers t1, t2
|
||||
jirl zero, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp)
|
||||
EXPORT_SYMBOL(_save_fp)
|
||||
|
||||
@ -161,10 +161,10 @@ EXPORT_SYMBOL(_save_fp)
|
||||
* Restore a thread's fp context.
|
||||
*/
|
||||
SYM_FUNC_START(_restore_fp)
|
||||
fpu_restore_double a0 t1 # clobbers t1
|
||||
fpu_restore_csr a0 t1
|
||||
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
|
||||
jirl zero, ra, 0
|
||||
fpu_restore_double a0 t1 # clobbers t1
|
||||
fpu_restore_csr a0 t1
|
||||
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp)
|
||||
|
||||
/*
|
||||
@ -216,7 +216,7 @@ SYM_FUNC_START(_init_fpu)
|
||||
movgr2fr.d $f30, t1
|
||||
movgr2fr.d $f31, t1
|
||||
|
||||
jirl zero, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(_init_fpu)
|
||||
|
||||
/*
|
||||
@ -225,11 +225,11 @@ SYM_FUNC_END(_init_fpu)
|
||||
* a2: fcsr
|
||||
*/
|
||||
SYM_FUNC_START(_save_fp_context)
|
||||
sc_save_fcc a1 t1 t2
|
||||
sc_save_fcsr a2 t1
|
||||
sc_save_fp a0
|
||||
li.w a0, 0 # success
|
||||
jirl zero, ra, 0
|
||||
sc_save_fcc a1 t1 t2
|
||||
sc_save_fcsr a2 t1
|
||||
sc_save_fp a0
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp_context)
|
||||
|
||||
/*
|
||||
@ -238,14 +238,14 @@ SYM_FUNC_END(_save_fp_context)
|
||||
* a2: fcsr
|
||||
*/
|
||||
SYM_FUNC_START(_restore_fp_context)
|
||||
sc_restore_fp a0
|
||||
sc_restore_fcc a1 t1 t2
|
||||
sc_restore_fcsr a2 t1
|
||||
li.w a0, 0 # success
|
||||
jirl zero, ra, 0
|
||||
sc_restore_fp a0
|
||||
sc_restore_fcc a1 t1 t2
|
||||
sc_restore_fcsr a2 t1
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp_context)
|
||||
|
||||
SYM_FUNC_START(fault)
|
||||
li.w a0, -EFAULT # failure
|
||||
jirl zero, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(fault)
|
||||
|
@ -28,23 +28,23 @@ SYM_FUNC_START(__arch_cpu_idle)
|
||||
nop
|
||||
idle 0
|
||||
/* end of rollback region */
|
||||
1: jirl zero, ra, 0
|
||||
1: jr ra
|
||||
SYM_FUNC_END(__arch_cpu_idle)
|
||||
|
||||
SYM_FUNC_START(handle_vint)
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
la.abs t1, __arch_cpu_idle
|
||||
LONG_L t0, sp, PT_ERA
|
||||
LONG_L t0, sp, PT_ERA
|
||||
/* 32 byte rollback region */
|
||||
ori t0, t0, 0x1f
|
||||
xori t0, t0, 0x1f
|
||||
bne t0, t1, 1f
|
||||
LONG_S t0, sp, PT_ERA
|
||||
LONG_S t0, sp, PT_ERA
|
||||
1: move a0, sp
|
||||
move a1, sp
|
||||
la.abs t0, do_vint
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_vint)
|
||||
|
||||
@ -72,7 +72,7 @@ SYM_FUNC_END(except_vec_cex)
|
||||
build_prep_\prep
|
||||
move a0, sp
|
||||
la.abs t0, do_\handler
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_\exception)
|
||||
.endm
|
||||
@ -91,5 +91,5 @@ SYM_FUNC_END(except_vec_cex)
|
||||
|
||||
SYM_FUNC_START(handle_sys)
|
||||
la.abs t0, handle_syscall
|
||||
jirl zero, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_sys)
|
||||
|
@ -32,7 +32,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
|
||||
/* We might not get launched at the address the kernel is linked to,
|
||||
so we jump there. */
|
||||
la.abs t0, 0f
|
||||
jirl zero, t0, 0
|
||||
jr t0
|
||||
0:
|
||||
la t0, __bss_start # clear .bss
|
||||
st.d zero, t0, 0
|
||||
@ -50,7 +50,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
|
||||
/* KSave3 used for percpu base, initialized as 0 */
|
||||
csrwr zero, PERCPU_BASE_KS
|
||||
/* GPR21 used for percpu base (runtime), initialized as 0 */
|
||||
or u0, zero, zero
|
||||
move u0, zero
|
||||
|
||||
la tp, init_thread_union
|
||||
/* Set the SP after an empty pt_regs. */
|
||||
@ -85,8 +85,8 @@ SYM_CODE_START(smpboot_entry)
|
||||
ld.d sp, t0, CPU_BOOT_STACK
|
||||
ld.d tp, t0, CPU_BOOT_TINFO
|
||||
|
||||
la.abs t0, 0f
|
||||
jirl zero, t0, 0
|
||||
la.abs t0, 0f
|
||||
jr t0
|
||||
0:
|
||||
bl start_secondary
|
||||
SYM_CODE_END(smpboot_entry)
|
||||
|
@ -193,7 +193,7 @@ static int fpr_set(struct task_struct *target,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
const int fcc_end = fcc_start + sizeof(u64);
|
||||
const int fcsr_start = fcc_start + sizeof(u64);
|
||||
int err;
|
||||
|
||||
BUG_ON(count % sizeof(elf_fpreg_t));
|
||||
@ -209,10 +209,12 @@ static int fpr_set(struct task_struct *target,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (count > 0)
|
||||
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcc,
|
||||
fcc_start, fcc_end);
|
||||
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcc, fcc_start,
|
||||
fcc_start + sizeof(u64));
|
||||
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcsr, fcsr_start,
|
||||
fcsr_start + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <acpi/reboot.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/reboot.h>
|
||||
|
@ -126,7 +126,7 @@ static void __init parse_bios_table(const struct dmi_header *dm)
|
||||
char *dmi_data = (char *)dm;
|
||||
|
||||
bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET);
|
||||
b_info.bios_size = *(dmi_data + SMBIOS_BIOSSIZE_OFFSET);
|
||||
b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
|
||||
|
||||
if (bios_extern & LOONGSON_EFI_ENABLE)
|
||||
set_bit(EFI_BOOT, &efi.flags);
|
||||
|
@ -278,116 +278,29 @@ void loongson3_cpu_die(unsigned int cpu)
|
||||
mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* The target CPU should go to XKPRANGE (uncached area) and flush
|
||||
* ICache/DCache/VCache before the control CPU can safely disable its clock.
|
||||
*/
|
||||
static void loongson3_play_dead(int *state_addr)
|
||||
void play_dead(void)
|
||||
{
|
||||
register int val;
|
||||
register void *addr;
|
||||
register uint64_t addr;
|
||||
register void (*init_fn)(void);
|
||||
|
||||
__asm__ __volatile__(
|
||||
" li.d %[addr], 0x8000000000000000\n"
|
||||
"1: cacop 0x8, %[addr], 0 \n" /* flush ICache */
|
||||
" cacop 0x8, %[addr], 1 \n"
|
||||
" cacop 0x8, %[addr], 2 \n"
|
||||
" cacop 0x8, %[addr], 3 \n"
|
||||
" cacop 0x9, %[addr], 0 \n" /* flush DCache */
|
||||
" cacop 0x9, %[addr], 1 \n"
|
||||
" cacop 0x9, %[addr], 2 \n"
|
||||
" cacop 0x9, %[addr], 3 \n"
|
||||
" addi.w %[sets], %[sets], -1 \n"
|
||||
" addi.d %[addr], %[addr], 0x40 \n"
|
||||
" bnez %[sets], 1b \n"
|
||||
" li.d %[addr], 0x8000000000000000\n"
|
||||
"2: cacop 0xa, %[addr], 0 \n" /* flush VCache */
|
||||
" cacop 0xa, %[addr], 1 \n"
|
||||
" cacop 0xa, %[addr], 2 \n"
|
||||
" cacop 0xa, %[addr], 3 \n"
|
||||
" cacop 0xa, %[addr], 4 \n"
|
||||
" cacop 0xa, %[addr], 5 \n"
|
||||
" cacop 0xa, %[addr], 6 \n"
|
||||
" cacop 0xa, %[addr], 7 \n"
|
||||
" cacop 0xa, %[addr], 8 \n"
|
||||
" cacop 0xa, %[addr], 9 \n"
|
||||
" cacop 0xa, %[addr], 10 \n"
|
||||
" cacop 0xa, %[addr], 11 \n"
|
||||
" cacop 0xa, %[addr], 12 \n"
|
||||
" cacop 0xa, %[addr], 13 \n"
|
||||
" cacop 0xa, %[addr], 14 \n"
|
||||
" cacop 0xa, %[addr], 15 \n"
|
||||
" addi.w %[vsets], %[vsets], -1 \n"
|
||||
" addi.d %[addr], %[addr], 0x40 \n"
|
||||
" bnez %[vsets], 2b \n"
|
||||
" li.w %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
|
||||
" st.w %[val], %[state_addr], 0 \n"
|
||||
" dbar 0 \n"
|
||||
" cacop 0x11, %[state_addr], 0 \n" /* flush entry of *state_addr */
|
||||
: [addr] "=&r" (addr), [val] "=&r" (val)
|
||||
: [state_addr] "r" (state_addr),
|
||||
[sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
|
||||
[vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
|
||||
|
||||
idle_task_exit();
|
||||
local_irq_enable();
|
||||
change_csr_ecfg(ECFG0_IM, ECFGF_IPI);
|
||||
set_csr_ecfg(ECFGF_IPI);
|
||||
__this_cpu_write(cpu_state, CPU_DEAD);
|
||||
|
||||
__asm__ __volatile__(
|
||||
" idle 0 \n"
|
||||
" li.w $t0, 0x1020 \n"
|
||||
" iocsrrd.d %[init_fn], $t0 \n" /* Get init PC */
|
||||
: [init_fn] "=&r" (addr)
|
||||
: /* No Input */
|
||||
: "a0");
|
||||
init_fn = __va(addr);
|
||||
__smp_mb();
|
||||
do {
|
||||
__asm__ __volatile__("idle 0\n\t");
|
||||
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
|
||||
} while (addr == 0);
|
||||
|
||||
init_fn = (void *)TO_CACHE(addr);
|
||||
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
|
||||
|
||||
init_fn();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
void play_dead(void)
|
||||
{
|
||||
int *state_addr;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
void (*play_dead_uncached)(int *s);
|
||||
|
||||
idle_task_exit();
|
||||
play_dead_uncached = (void *)TO_UNCACHE(__pa((unsigned long)loongson3_play_dead));
|
||||
state_addr = &per_cpu(cpu_state, cpu);
|
||||
mb();
|
||||
play_dead_uncached(state_addr);
|
||||
}
|
||||
|
||||
static int loongson3_enable_clock(unsigned int cpu)
|
||||
{
|
||||
uint64_t core_id = cpu_data[cpu].core;
|
||||
uint64_t package_id = cpu_data[cpu].package;
|
||||
|
||||
LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int loongson3_disable_clock(unsigned int cpu)
|
||||
{
|
||||
uint64_t core_id = cpu_data[cpu].core;
|
||||
uint64_t package_id = cpu_data[cpu].package;
|
||||
|
||||
LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int register_loongson3_notifier(void)
|
||||
{
|
||||
return cpuhp_setup_state_nocalls(CPUHP_LOONGARCH_SOC_PREPARE,
|
||||
"loongarch/loongson:prepare",
|
||||
loongson3_enable_clock,
|
||||
loongson3_disable_clock);
|
||||
}
|
||||
early_initcall(register_loongson3_notifier);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -24,8 +24,8 @@ SYM_FUNC_START(__switch_to)
|
||||
move tp, a2
|
||||
cpu_restore_nonscratch a1
|
||||
|
||||
li.w t0, _THREAD_SIZE - 32
|
||||
PTR_ADD t0, t0, tp
|
||||
li.w t0, _THREAD_SIZE - 32
|
||||
PTR_ADD t0, t0, tp
|
||||
set_saved_sp t0, t1, t2
|
||||
|
||||
ldptr.d t1, a1, THREAD_CSRPRMD
|
||||
|
@ -32,7 +32,7 @@ SYM_FUNC_START(__clear_user)
|
||||
1: st.b zero, a0, 0
|
||||
addi.d a0, a0, 1
|
||||
addi.d a1, a1, -1
|
||||
bgt a1, zero, 1b
|
||||
bgtz a1, 1b
|
||||
|
||||
2: move a0, a1
|
||||
jr ra
|
||||
|
@ -35,7 +35,7 @@ SYM_FUNC_START(__copy_user)
|
||||
addi.d a0, a0, 1
|
||||
addi.d a1, a1, 1
|
||||
addi.d a2, a2, -1
|
||||
bgt a2, zero, 1b
|
||||
bgtz a2, 1b
|
||||
|
||||
3: move a0, a2
|
||||
jr ra
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/timex.h>
|
||||
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
void __delay(unsigned long cycles)
|
||||
|
@ -10,75 +10,75 @@
|
||||
|
||||
.align 5
|
||||
SYM_FUNC_START(clear_page)
|
||||
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t0, t0, a0
|
||||
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t0, t0, a0
|
||||
1:
|
||||
st.d zero, a0, 0
|
||||
st.d zero, a0, 8
|
||||
st.d zero, a0, 16
|
||||
st.d zero, a0, 24
|
||||
st.d zero, a0, 32
|
||||
st.d zero, a0, 40
|
||||
st.d zero, a0, 48
|
||||
st.d zero, a0, 56
|
||||
addi.d a0, a0, 128
|
||||
st.d zero, a0, -64
|
||||
st.d zero, a0, -56
|
||||
st.d zero, a0, -48
|
||||
st.d zero, a0, -40
|
||||
st.d zero, a0, -32
|
||||
st.d zero, a0, -24
|
||||
st.d zero, a0, -16
|
||||
st.d zero, a0, -8
|
||||
bne t0, a0, 1b
|
||||
st.d zero, a0, 0
|
||||
st.d zero, a0, 8
|
||||
st.d zero, a0, 16
|
||||
st.d zero, a0, 24
|
||||
st.d zero, a0, 32
|
||||
st.d zero, a0, 40
|
||||
st.d zero, a0, 48
|
||||
st.d zero, a0, 56
|
||||
addi.d a0, a0, 128
|
||||
st.d zero, a0, -64
|
||||
st.d zero, a0, -56
|
||||
st.d zero, a0, -48
|
||||
st.d zero, a0, -40
|
||||
st.d zero, a0, -32
|
||||
st.d zero, a0, -24
|
||||
st.d zero, a0, -16
|
||||
st.d zero, a0, -8
|
||||
bne t0, a0, 1b
|
||||
|
||||
jirl $r0, ra, 0
|
||||
jr ra
|
||||
SYM_FUNC_END(clear_page)
|
||||
EXPORT_SYMBOL(clear_page)
|
||||
|
||||
.align 5
|
||||
SYM_FUNC_START(copy_page)
|
||||
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t8, t8, a0
|
||||
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
|
||||
add.d t8, t8, a0
|
||||
1:
|
||||
ld.d t0, a1, 0
|
||||
ld.d t1, a1, 8
|
||||
ld.d t2, a1, 16
|
||||
ld.d t3, a1, 24
|
||||
ld.d t4, a1, 32
|
||||
ld.d t5, a1, 40
|
||||
ld.d t6, a1, 48
|
||||
ld.d t7, a1, 56
|
||||
ld.d t0, a1, 0
|
||||
ld.d t1, a1, 8
|
||||
ld.d t2, a1, 16
|
||||
ld.d t3, a1, 24
|
||||
ld.d t4, a1, 32
|
||||
ld.d t5, a1, 40
|
||||
ld.d t6, a1, 48
|
||||
ld.d t7, a1, 56
|
||||
|
||||
st.d t0, a0, 0
|
||||
st.d t1, a0, 8
|
||||
ld.d t0, a1, 64
|
||||
ld.d t1, a1, 72
|
||||
st.d t2, a0, 16
|
||||
st.d t3, a0, 24
|
||||
ld.d t2, a1, 80
|
||||
ld.d t3, a1, 88
|
||||
st.d t4, a0, 32
|
||||
st.d t5, a0, 40
|
||||
ld.d t4, a1, 96
|
||||
ld.d t5, a1, 104
|
||||
st.d t6, a0, 48
|
||||
st.d t7, a0, 56
|
||||
ld.d t6, a1, 112
|
||||
ld.d t7, a1, 120
|
||||
addi.d a0, a0, 128
|
||||
addi.d a1, a1, 128
|
||||
st.d t0, a0, 0
|
||||
st.d t1, a0, 8
|
||||
ld.d t0, a1, 64
|
||||
ld.d t1, a1, 72
|
||||
st.d t2, a0, 16
|
||||
st.d t3, a0, 24
|
||||
ld.d t2, a1, 80
|
||||
ld.d t3, a1, 88
|
||||
st.d t4, a0, 32
|
||||
st.d t5, a0, 40
|
||||
ld.d t4, a1, 96
|
||||
ld.d t5, a1, 104
|
||||
st.d t6, a0, 48
|
||||
st.d t7, a0, 56
|
||||
ld.d t6, a1, 112
|
||||
ld.d t7, a1, 120
|
||||
addi.d a0, a0, 128
|
||||
addi.d a1, a1, 128
|
||||
|
||||
st.d t0, a0, -64
|
||||
st.d t1, a0, -56
|
||||
st.d t2, a0, -48
|
||||
st.d t3, a0, -40
|
||||
st.d t4, a0, -32
|
||||
st.d t5, a0, -24
|
||||
st.d t6, a0, -16
|
||||
st.d t7, a0, -8
|
||||
st.d t0, a0, -64
|
||||
st.d t1, a0, -56
|
||||
st.d t2, a0, -48
|
||||
st.d t3, a0, -40
|
||||
st.d t4, a0, -32
|
||||
st.d t5, a0, -24
|
||||
st.d t6, a0, -16
|
||||
st.d t7, a0, -8
|
||||
|
||||
bne t8, a0, 1b
|
||||
jirl $r0, ra, 0
|
||||
bne t8, a0, 1b
|
||||
jr ra
|
||||
SYM_FUNC_END(copy_page)
|
||||
EXPORT_SYMBOL(copy_page)
|
||||
|
@ -18,7 +18,7 @@
|
||||
REG_S a2, sp, PT_BVADDR
|
||||
li.w a1, \write
|
||||
la.abs t0, do_page_fault
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(tlb_do_page_fault_\write)
|
||||
.endm
|
||||
@ -34,7 +34,7 @@ SYM_FUNC_START(handle_tlb_protect)
|
||||
csrrd a2, LOONGARCH_CSR_BADV
|
||||
REG_S a2, sp, PT_BVADDR
|
||||
la.abs t0, do_page_fault
|
||||
jirl ra, t0, 0
|
||||
jirl ra, t0, 0
|
||||
RESTORE_ALL_AND_RET
|
||||
SYM_FUNC_END(handle_tlb_protect)
|
||||
|
||||
@ -47,7 +47,7 @@ SYM_FUNC_START(handle_tlb_load)
|
||||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
blt t0, $r0, vmalloc_load
|
||||
bltz t0, vmalloc_load
|
||||
csrrd t1, LOONGARCH_CSR_PGDL
|
||||
|
||||
vmalloc_done_load:
|
||||
@ -80,7 +80,7 @@ vmalloc_done_load:
|
||||
* see if we need to jump to huge tlb processing.
|
||||
*/
|
||||
andi t0, ra, _PAGE_HUGE
|
||||
bne t0, $r0, tlb_huge_update_load
|
||||
bnez t0, tlb_huge_update_load
|
||||
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
|
||||
@ -100,12 +100,12 @@ smp_pgtable_change_load:
|
||||
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_load
|
||||
beqz ra, nopage_tlb_load
|
||||
|
||||
ori t0, t0, _PAGE_VALID
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, smp_pgtable_change_load
|
||||
beqz t0, smp_pgtable_change_load
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
@ -139,23 +139,23 @@ tlb_huge_update_load:
|
||||
#endif
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_load
|
||||
beqz ra, nopage_tlb_load
|
||||
tlbsrch
|
||||
|
||||
ori t0, t0, _PAGE_VALID
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, tlb_huge_update_load
|
||||
beqz t0, tlb_huge_update_load
|
||||
ld.d t0, t1, 0
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
tlbwr
|
||||
|
||||
csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
|
||||
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
@ -178,27 +178,27 @@ tlb_huge_update_load:
|
||||
addi.d t0, ra, 0
|
||||
|
||||
/* Convert to entrylo1 */
|
||||
addi.d t1, $r0, 1
|
||||
addi.d t1, zero, 1
|
||||
slli.d t1, t1, (HPAGE_SHIFT - 1)
|
||||
add.d t0, t0, t1
|
||||
csrwr t0, LOONGARCH_CSR_TLBELO1
|
||||
|
||||
/* Set huge page tlb entry size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbfill
|
||||
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
nopage_tlb_load:
|
||||
dbar 0
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la.abs t0, tlb_do_page_fault_0
|
||||
jirl $r0, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_load)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_store)
|
||||
@ -210,7 +210,7 @@ SYM_FUNC_START(handle_tlb_store)
|
||||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
blt t0, $r0, vmalloc_store
|
||||
bltz t0, vmalloc_store
|
||||
csrrd t1, LOONGARCH_CSR_PGDL
|
||||
|
||||
vmalloc_done_store:
|
||||
@ -244,7 +244,7 @@ vmalloc_done_store:
|
||||
* see if we need to jump to huge tlb processing.
|
||||
*/
|
||||
andi t0, ra, _PAGE_HUGE
|
||||
bne t0, $r0, tlb_huge_update_store
|
||||
bnez t0, tlb_huge_update_store
|
||||
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
|
||||
@ -265,12 +265,12 @@ smp_pgtable_change_store:
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
bne ra, $r0, nopage_tlb_store
|
||||
bnez ra, nopage_tlb_store
|
||||
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, smp_pgtable_change_store
|
||||
beqz t0, smp_pgtable_change_store
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
@ -306,24 +306,24 @@ tlb_huge_update_store:
|
||||
srli.d ra, t0, _PAGE_PRESENT_SHIFT
|
||||
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
|
||||
bne ra, $r0, nopage_tlb_store
|
||||
bnez ra, nopage_tlb_store
|
||||
|
||||
tlbsrch
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, tlb_huge_update_store
|
||||
beqz t0, tlb_huge_update_store
|
||||
ld.d t0, t1, 0
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
tlbwr
|
||||
|
||||
csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
|
||||
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
* configured huge page size. This is twice the
|
||||
@ -345,28 +345,28 @@ tlb_huge_update_store:
|
||||
addi.d t0, ra, 0
|
||||
|
||||
/* Convert to entrylo1 */
|
||||
addi.d t1, $r0, 1
|
||||
addi.d t1, zero, 1
|
||||
slli.d t1, t1, (HPAGE_SHIFT - 1)
|
||||
add.d t0, t0, t1
|
||||
csrwr t0, LOONGARCH_CSR_TLBELO1
|
||||
|
||||
/* Set huge page tlb entry size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbfill
|
||||
|
||||
/* Reset default page size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
nopage_tlb_store:
|
||||
dbar 0
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la.abs t0, tlb_do_page_fault_1
|
||||
jirl $r0, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_store)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_modify)
|
||||
@ -378,7 +378,7 @@ SYM_FUNC_START(handle_tlb_modify)
|
||||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
blt t0, $r0, vmalloc_modify
|
||||
bltz t0, vmalloc_modify
|
||||
csrrd t1, LOONGARCH_CSR_PGDL
|
||||
|
||||
vmalloc_done_modify:
|
||||
@ -411,7 +411,7 @@ vmalloc_done_modify:
|
||||
* see if we need to jump to huge tlb processing.
|
||||
*/
|
||||
andi t0, ra, _PAGE_HUGE
|
||||
bne t0, $r0, tlb_huge_update_modify
|
||||
bnez t0, tlb_huge_update_modify
|
||||
|
||||
csrrd t0, LOONGARCH_CSR_BADV
|
||||
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
|
||||
@ -431,12 +431,12 @@ smp_pgtable_change_modify:
|
||||
|
||||
srli.d ra, t0, _PAGE_WRITE_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_modify
|
||||
beqz ra, nopage_tlb_modify
|
||||
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, smp_pgtable_change_modify
|
||||
beqz t0, smp_pgtable_change_modify
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
@ -454,7 +454,7 @@ leave_modify:
|
||||
ertn
|
||||
#ifdef CONFIG_64BIT
|
||||
vmalloc_modify:
|
||||
la.abs t1, swapper_pg_dir
|
||||
la.abs t1, swapper_pg_dir
|
||||
b vmalloc_done_modify
|
||||
#endif
|
||||
|
||||
@ -471,14 +471,14 @@ tlb_huge_update_modify:
|
||||
|
||||
srli.d ra, t0, _PAGE_WRITE_SHIFT
|
||||
andi ra, ra, 1
|
||||
beq ra, $r0, nopage_tlb_modify
|
||||
beqz ra, nopage_tlb_modify
|
||||
|
||||
tlbsrch
|
||||
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
sc.d t0, t1, 0
|
||||
beq t0, $r0, tlb_huge_update_modify
|
||||
beqz t0, tlb_huge_update_modify
|
||||
ld.d t0, t1, 0
|
||||
#else
|
||||
st.d t0, t1, 0
|
||||
@ -504,28 +504,28 @@ tlb_huge_update_modify:
|
||||
addi.d t0, ra, 0
|
||||
|
||||
/* Convert to entrylo1 */
|
||||
addi.d t1, $r0, 1
|
||||
addi.d t1, zero, 1
|
||||
slli.d t1, t1, (HPAGE_SHIFT - 1)
|
||||
add.d t0, t0, t1
|
||||
csrwr t0, LOONGARCH_CSR_TLBELO1
|
||||
|
||||
/* Set huge page tlb entry size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbwr
|
||||
|
||||
/* Reset default page size */
|
||||
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
nopage_tlb_modify:
|
||||
dbar 0
|
||||
csrrd ra, EXCEPTION_KS2
|
||||
la.abs t0, tlb_do_page_fault_1
|
||||
jirl $r0, t0, 0
|
||||
jr t0
|
||||
SYM_FUNC_END(handle_tlb_modify)
|
||||
|
||||
SYM_FUNC_START(handle_tlb_refill)
|
||||
|
@ -282,6 +282,10 @@ config PPC
|
||||
# Please keep this list sorted alphabetically.
|
||||
#
|
||||
|
||||
config PPC_LONG_DOUBLE_128
|
||||
depends on PPC64
|
||||
def_bool $(success,test "$(shell,echo __LONG_DOUBLE_128__ | $(CC) -E -P -)" = 1)
|
||||
|
||||
config PPC_BARRIER_NOSPEC
|
||||
bool
|
||||
default y
|
||||
|
@ -20,6 +20,7 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||
CFLAGS_prom_init.o += -fno-stack-protector
|
||||
CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
|
||||
CFLAGS_prom_init.o += -ffreestanding
|
||||
CFLAGS_prom_init.o += $(call cc-option, -ftrivial-auto-var-init=uninitialized)
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
|
@ -111,7 +111,7 @@ PHONY += vdso_install
|
||||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
||||
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
|
||||
$(build)=arch/riscv/kernel/compat_vdso $@)
|
||||
$(build)=arch/riscv/kernel/compat_vdso compat_$@)
|
||||
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
ifeq ($(CONFIG_MMU),y)
|
||||
|
@ -72,7 +72,6 @@ static inline u64 lower_bits(u64 val, unsigned int bits)
|
||||
|
||||
struct real_mode_header;
|
||||
enum stack_type;
|
||||
struct ghcb;
|
||||
|
||||
/* Early IDT entry points for #VC handler */
|
||||
extern void vc_no_ghcb(void);
|
||||
@ -156,11 +155,7 @@ static __always_inline void sev_es_nmi_complete(void)
|
||||
__sev_es_nmi_complete();
|
||||
}
|
||||
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
|
||||
extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
|
||||
bool set_ghcb_msr,
|
||||
struct es_em_ctxt *ctxt,
|
||||
u64 exit_code, u64 exit_info_1,
|
||||
u64 exit_info_2);
|
||||
|
||||
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
|
||||
{
|
||||
int rc;
|
||||
|
@ -1520,6 +1520,7 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
* enable IBRS around firmware calls.
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
|
||||
boot_cpu_has(X86_FEATURE_IBPB) &&
|
||||
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
|
||||
|
||||
|
@ -219,9 +219,10 @@ static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt
|
||||
return ES_VMM_ERROR;
|
||||
}
|
||||
|
||||
enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr,
|
||||
struct es_em_ctxt *ctxt, u64 exit_code,
|
||||
u64 exit_info_1, u64 exit_info_2)
|
||||
static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
|
||||
struct es_em_ctxt *ctxt,
|
||||
u64 exit_code, u64 exit_info_1,
|
||||
u64 exit_info_2)
|
||||
{
|
||||
/* Fill in protocol and format specifiers */
|
||||
ghcb->protocol_version = ghcb_version;
|
||||
@ -231,14 +232,7 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr,
|
||||
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
|
||||
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
|
||||
|
||||
/*
|
||||
* Hyper-V unenlightened guests use a paravisor for communicating and
|
||||
* GHCB pages are being allocated and set up by that paravisor. Linux
|
||||
* should not change the GHCB page's physical address.
|
||||
*/
|
||||
if (set_ghcb_msr)
|
||||
sev_es_wr_ghcb_msr(__pa(ghcb));
|
||||
|
||||
sev_es_wr_ghcb_msr(__pa(ghcb));
|
||||
VMGEXIT();
|
||||
|
||||
return verify_exception_info(ghcb, ctxt);
|
||||
@ -795,7 +789,7 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
*/
|
||||
sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
|
||||
ghcb_set_sw_scratch(ghcb, sw_scratch);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO,
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
|
||||
exit_info_1, exit_info_2);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
@ -837,8 +831,7 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
|
||||
ghcb_set_rax(ghcb, rax);
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt,
|
||||
SVM_EXIT_IOIO, exit_info_1, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
@ -894,7 +887,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
|
||||
/* xgetbv will cause #GP - use reset value for xcr0 */
|
||||
ghcb_set_xcr0(ghcb, 1);
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
@ -919,7 +912,7 @@ static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
|
||||
bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
|
||||
enum es_result ret;
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
|
@ -786,7 +786,7 @@ static int vmgexit_psc(struct snp_psc_desc *desc)
|
||||
ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
|
||||
|
||||
/* This will advance the shared buffer data points to. */
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
|
||||
|
||||
/*
|
||||
* Page State Change VMGEXIT can pass error code through
|
||||
@ -1212,8 +1212,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
ghcb_set_rdx(ghcb, regs->dx);
|
||||
}
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR,
|
||||
exit_info_1, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
|
||||
|
||||
if ((ret == ES_OK) && (!exit_info_1)) {
|
||||
regs->ax = ghcb->save.rax;
|
||||
@ -1452,7 +1451,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
|
||||
|
||||
ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
|
||||
|
||||
return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2);
|
||||
return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1628,7 +1627,7 @@ static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
|
||||
|
||||
/* Using a value of 0 for ExitInfo1 means RAX holds the value */
|
||||
ghcb_set_rax(ghcb, val);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
@ -1658,7 +1657,7 @@ static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
|
||||
static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
|
||||
struct es_em_ctxt *ctxt)
|
||||
{
|
||||
return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0);
|
||||
return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
|
||||
}
|
||||
|
||||
static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
@ -1667,7 +1666,7 @@ static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt
|
||||
|
||||
ghcb_set_rcx(ghcb, ctxt->regs->cx);
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
@ -1708,7 +1707,7 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
|
||||
if (x86_platform.hyper.sev_es_hcall_prepare)
|
||||
x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
@ -2197,7 +2196,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
|
||||
ghcb_set_rbx(ghcb, input->data_npages);
|
||||
}
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
|
||||
ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
|
||||
if (ret)
|
||||
goto e_put;
|
||||
|
||||
|
@ -138,6 +138,7 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
|
||||
&r_apb2_rsb_clk.common,
|
||||
&r_apb1_ir_clk.common,
|
||||
&r_apb1_w1_clk.common,
|
||||
&r_apb1_rtc_clk.common,
|
||||
&ir_clk.common,
|
||||
&w1_clk.common,
|
||||
};
|
||||
|
@ -103,9 +103,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle)
|
||||
|
||||
dmi_memdev_name(handle, &bank, &device);
|
||||
|
||||
/* both strings must be non-zero */
|
||||
if (bank && *bank && device && *device)
|
||||
snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device);
|
||||
/*
|
||||
* Set to a NULL string when both bank and device are zero. In this case,
|
||||
* the label assigned by default will be preserved.
|
||||
*/
|
||||
snprintf(dimm->label, sizeof(dimm->label), "%s%s%s",
|
||||
(bank && *bank) ? bank : "",
|
||||
(bank && *bank && device && *device) ? " " : "",
|
||||
(device && *device) ? device : "");
|
||||
}
|
||||
|
||||
static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
|
||||
|
@ -514,6 +514,28 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
|
||||
memset(p, 0, sizeof(*p));
|
||||
}
|
||||
|
||||
static void enable_intr(struct synps_edac_priv *priv)
|
||||
{
|
||||
/* Enable UE/CE Interrupts */
|
||||
if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
|
||||
writel(DDR_UE_MASK | DDR_CE_MASK,
|
||||
priv->baseaddr + ECC_CLR_OFST);
|
||||
else
|
||||
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
|
||||
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
|
||||
|
||||
}
|
||||
|
||||
static void disable_intr(struct synps_edac_priv *priv)
|
||||
{
|
||||
/* Disable UE/CE Interrupts */
|
||||
if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
|
||||
writel(0x0, priv->baseaddr + ECC_CLR_OFST);
|
||||
else
|
||||
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
|
||||
priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
|
||||
}
|
||||
|
||||
/**
|
||||
* intr_handler - Interrupt Handler for ECC interrupts.
|
||||
* @irq: IRQ number.
|
||||
@ -555,6 +577,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
|
||||
/* v3.0 of the controller does not have this register */
|
||||
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
|
||||
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
|
||||
else
|
||||
enable_intr(priv);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -837,25 +862,6 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
|
||||
init_csrows(mci);
|
||||
}
|
||||
|
||||
static void enable_intr(struct synps_edac_priv *priv)
|
||||
{
|
||||
/* Enable UE/CE Interrupts */
|
||||
if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
|
||||
writel(DDR_UE_MASK | DDR_CE_MASK,
|
||||
priv->baseaddr + ECC_CLR_OFST);
|
||||
else
|
||||
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
|
||||
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
|
||||
|
||||
}
|
||||
|
||||
static void disable_intr(struct synps_edac_priv *priv)
|
||||
{
|
||||
/* Disable UE/CE Interrupts */
|
||||
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
|
||||
priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
|
||||
}
|
||||
|
||||
static int setup_irq(struct mem_ctl_info *mci,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
|
@ -6,7 +6,7 @@ config DRM_AMD_DC
|
||||
bool "AMD DC - Enable new display engine"
|
||||
default y
|
||||
select SND_HDA_COMPONENT if SND_HDA_CORE
|
||||
select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
|
||||
select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
|
||||
help
|
||||
Choose this option if you want to use the new display engine
|
||||
support for AMDGPU. This adds required support for Vega and
|
||||
|
@ -201,6 +201,8 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine);
|
||||
int intel_engine_stop_cs(struct intel_engine_cs *engine);
|
||||
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
|
||||
|
||||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
|
||||
|
@ -1282,10 +1282,10 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
|
||||
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
|
||||
|
||||
/*
|
||||
* Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is
|
||||
* Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
|
||||
* stopped, set ring stop bit and prefetch disable bit to halt CS
|
||||
*/
|
||||
if (GRAPHICS_VER(engine->i915) == 12)
|
||||
if (IS_GRAPHICS_VER(engine->i915, 11, 12))
|
||||
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
|
||||
_MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
|
||||
|
||||
@ -1308,6 +1308,18 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
|
||||
return -ENODEV;
|
||||
|
||||
ENGINE_TRACE(engine, "\n");
|
||||
/*
|
||||
* TODO: Find out why occasionally stopping the CS times out. Seen
|
||||
* especially with gem_eio tests.
|
||||
*
|
||||
* Occasionally trying to stop the cs times out, but does not adversely
|
||||
* affect functionality. The timeout is set as a config parameter that
|
||||
* defaults to 100ms. In most cases the follow up operation is to wait
|
||||
* for pending MI_FORCE_WAKES. The assumption is that this timeout is
|
||||
* sufficient for any pending MI_FORCEWAKEs to complete. Once root
|
||||
* caused, the caller must check and handle the return from this
|
||||
* function.
|
||||
*/
|
||||
if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
|
||||
ENGINE_TRACE(engine,
|
||||
"timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
|
||||
@ -1334,6 +1346,78 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
|
||||
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
}
|
||||
|
||||
static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
|
||||
{
|
||||
static const i915_reg_t _reg[I915_NUM_ENGINES] = {
|
||||
[RCS0] = MSG_IDLE_CS,
|
||||
[BCS0] = MSG_IDLE_BCS,
|
||||
[VCS0] = MSG_IDLE_VCS0,
|
||||
[VCS1] = MSG_IDLE_VCS1,
|
||||
[VCS2] = MSG_IDLE_VCS2,
|
||||
[VCS3] = MSG_IDLE_VCS3,
|
||||
[VCS4] = MSG_IDLE_VCS4,
|
||||
[VCS5] = MSG_IDLE_VCS5,
|
||||
[VCS6] = MSG_IDLE_VCS6,
|
||||
[VCS7] = MSG_IDLE_VCS7,
|
||||
[VECS0] = MSG_IDLE_VECS0,
|
||||
[VECS1] = MSG_IDLE_VECS1,
|
||||
[VECS2] = MSG_IDLE_VECS2,
|
||||
[VECS3] = MSG_IDLE_VECS3,
|
||||
[CCS0] = MSG_IDLE_CS,
|
||||
[CCS1] = MSG_IDLE_CS,
|
||||
[CCS2] = MSG_IDLE_CS,
|
||||
[CCS3] = MSG_IDLE_CS,
|
||||
};
|
||||
u32 val;
|
||||
|
||||
if (!_reg[engine->id].reg) {
|
||||
drm_err(&engine->i915->drm,
|
||||
"MSG IDLE undefined for engine id %u\n", engine->id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
val = intel_uncore_read(engine->uncore, _reg[engine->id]);
|
||||
|
||||
/* bits[29:25] & bits[13:9] >> shift */
|
||||
return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
|
||||
}
|
||||
|
||||
static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Ensure GPM receives fw up/down after CS is stopped */
|
||||
udelay(1);
|
||||
|
||||
/* Wait for forcewake request to complete in GPM */
|
||||
ret = __intel_wait_for_register_fw(gt->uncore,
|
||||
GEN9_PWRGT_DOMAIN_STATUS,
|
||||
fw_mask, fw_mask, 5000, 0, NULL);
|
||||
|
||||
/* Ensure CS receives fw ack from GPM */
|
||||
udelay(1);
|
||||
|
||||
if (ret)
|
||||
GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
|
||||
* pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
|
||||
* pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
|
||||
* MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
|
||||
* are concerned only with the gt reset here, we use a logical OR of pending
|
||||
* forcewakeups from all reset domains and then wait for them to complete by
|
||||
* querying PWRGT_DOMAIN_STATUS.
|
||||
*/
|
||||
void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
|
||||
{
|
||||
u32 fw_pending = __cs_pending_mi_force_wakes(engine);
|
||||
|
||||
if (fw_pending)
|
||||
__gpm_wait_for_fw_complete(engine->gt, fw_pending);
|
||||
}
|
||||
|
||||
static u32
|
||||
read_subslice_reg(const struct intel_engine_cs *engine,
|
||||
int slice, int subslice, i915_reg_t reg)
|
||||
|
@ -2968,6 +2968,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
|
||||
ring_set_paused(engine, 1);
|
||||
intel_engine_stop_cs(engine);
|
||||
|
||||
/*
|
||||
* Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
|
||||
* to wait for any pending mi force wakeups
|
||||
*/
|
||||
if (IS_GRAPHICS_VER(engine->i915, 11, 12))
|
||||
intel_engine_wait_for_pending_mi_fw(engine);
|
||||
|
||||
engine->execlists.reset_ccid = active_ccid(engine);
|
||||
}
|
||||
|
||||
|
@ -310,8 +310,8 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
|
||||
if (IS_DG2(gt->i915))
|
||||
flags |= GUC_WA_DUAL_QUEUE;
|
||||
|
||||
/* Wa_22011802037: graphics version 12 */
|
||||
if (GRAPHICS_VER(gt->i915) == 12)
|
||||
/* Wa_22011802037: graphics version 11/12 */
|
||||
if (IS_GRAPHICS_VER(gt->i915, 11, 12))
|
||||
flags |= GUC_WA_PRE_PARSER;
|
||||
|
||||
/* Wa_16011777198:dg2 */
|
||||
|
@ -1578,87 +1578,18 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
|
||||
lrc_update_regs(ce, engine, head);
|
||||
}
|
||||
|
||||
static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
|
||||
{
|
||||
static const i915_reg_t _reg[I915_NUM_ENGINES] = {
|
||||
[RCS0] = MSG_IDLE_CS,
|
||||
[BCS0] = MSG_IDLE_BCS,
|
||||
[VCS0] = MSG_IDLE_VCS0,
|
||||
[VCS1] = MSG_IDLE_VCS1,
|
||||
[VCS2] = MSG_IDLE_VCS2,
|
||||
[VCS3] = MSG_IDLE_VCS3,
|
||||
[VCS4] = MSG_IDLE_VCS4,
|
||||
[VCS5] = MSG_IDLE_VCS5,
|
||||
[VCS6] = MSG_IDLE_VCS6,
|
||||
[VCS7] = MSG_IDLE_VCS7,
|
||||
[VECS0] = MSG_IDLE_VECS0,
|
||||
[VECS1] = MSG_IDLE_VECS1,
|
||||
[VECS2] = MSG_IDLE_VECS2,
|
||||
[VECS3] = MSG_IDLE_VECS3,
|
||||
[CCS0] = MSG_IDLE_CS,
|
||||
[CCS1] = MSG_IDLE_CS,
|
||||
[CCS2] = MSG_IDLE_CS,
|
||||
[CCS3] = MSG_IDLE_CS,
|
||||
};
|
||||
u32 val;
|
||||
|
||||
if (!_reg[engine->id].reg)
|
||||
return 0;
|
||||
|
||||
val = intel_uncore_read(engine->uncore, _reg[engine->id]);
|
||||
|
||||
/* bits[29:25] & bits[13:9] >> shift */
|
||||
return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
|
||||
}
|
||||
|
||||
static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Ensure GPM receives fw up/down after CS is stopped */
|
||||
udelay(1);
|
||||
|
||||
/* Wait for forcewake request to complete in GPM */
|
||||
ret = __intel_wait_for_register_fw(gt->uncore,
|
||||
GEN9_PWRGT_DOMAIN_STATUS,
|
||||
fw_mask, fw_mask, 5000, 0, NULL);
|
||||
|
||||
/* Ensure CS receives fw ack from GPM */
|
||||
udelay(1);
|
||||
|
||||
if (ret)
|
||||
GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
|
||||
* pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
|
||||
* pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the
|
||||
* MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
|
||||
* are concerned only with the gt reset here, we use a logical OR of pending
|
||||
* forcewakeups from all reset domains and then wait for them to complete by
|
||||
* querying PWRGT_DOMAIN_STATUS.
|
||||
*/
|
||||
static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
|
||||
{
|
||||
u32 fw_pending;
|
||||
|
||||
if (GRAPHICS_VER(engine->i915) != 12)
|
||||
if (!IS_GRAPHICS_VER(engine->i915, 11, 12))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Wa_22011802037
|
||||
* TODO: Occasionally trying to stop the cs times out, but does not
|
||||
* adversely affect functionality. The timeout is set as a config
|
||||
* parameter that defaults to 100ms. Assuming that this timeout is
|
||||
* sufficient for any pending MI_FORCEWAKEs to complete, ignore the
|
||||
* timeout returned here until it is root caused.
|
||||
*/
|
||||
intel_engine_stop_cs(engine);
|
||||
|
||||
fw_pending = __cs_pending_mi_force_wakes(engine);
|
||||
if (fw_pending)
|
||||
__gpm_wait_for_fw_complete(engine->gt, fw_pending);
|
||||
/*
|
||||
* Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
|
||||
* to wait for any pending mi force wakeups
|
||||
*/
|
||||
intel_engine_wait_for_pending_mi_fw(engine);
|
||||
}
|
||||
|
||||
static void guc_reset_nop(struct intel_engine_cs *engine)
|
||||
|
@ -680,7 +680,11 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
|
||||
goto out_free_dma;
|
||||
|
||||
for (i = 0; i < npages; i += max) {
|
||||
args.end = start + (max << PAGE_SHIFT);
|
||||
if (args.start + (max << PAGE_SHIFT) > end)
|
||||
args.end = end;
|
||||
else
|
||||
args.end = args.start + (max << PAGE_SHIFT);
|
||||
|
||||
ret = migrate_vma_setup(&args);
|
||||
if (ret)
|
||||
goto out_free_pfns;
|
||||
|
@ -627,7 +627,7 @@ static const struct drm_connector_funcs simpledrm_connector_funcs = {
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int
|
||||
static enum drm_mode_status
|
||||
simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
|
@ -162,7 +162,13 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
|
||||
|
||||
raw_local_irq_enable();
|
||||
ret = __intel_idle(dev, drv, index);
|
||||
raw_local_irq_disable();
|
||||
|
||||
/*
|
||||
* The lockdep hardirqs state may be changed to 'on' with timer
|
||||
* tick interrupt followed by __do_softirq(). Use local_irq_disable()
|
||||
* to keep the hardirqs state correct.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
|
||||
int ref_ok, struct funeth_txq *xdp_q)
|
||||
{
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_frame *xdpf;
|
||||
struct xdp_buff xdp;
|
||||
u32 act;
|
||||
|
||||
@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
|
||||
case XDP_TX:
|
||||
if (unlikely(!ref_ok))
|
||||
goto pass;
|
||||
if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
|
||||
|
||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||
if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
|
||||
goto xdp_error;
|
||||
FUN_QSTAT_INC(q, xdp_tx);
|
||||
q->xdp_flush |= FUN_XDP_FLUSH_TX;
|
||||
|
@ -466,7 +466,7 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
|
||||
|
||||
do {
|
||||
fun_xdp_unmap(q, reclaim_idx);
|
||||
page_frag_free(q->info[reclaim_idx].vaddr);
|
||||
xdp_return_frame(q->info[reclaim_idx].xdpf);
|
||||
|
||||
trace_funeth_tx_free(q, reclaim_idx, 1, head);
|
||||
|
||||
@ -479,11 +479,11 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
|
||||
return npkts;
|
||||
}
|
||||
|
||||
bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
|
||||
bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
|
||||
{
|
||||
struct fun_eth_tx_req *req;
|
||||
struct fun_dataop_gl *gle;
|
||||
unsigned int idx;
|
||||
unsigned int idx, len;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
|
||||
@ -494,7 +494,8 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
|
||||
return false;
|
||||
}
|
||||
|
||||
dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
|
||||
len = xdpf->len;
|
||||
dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
|
||||
FUN_QSTAT_INC(q, tx_map_err);
|
||||
return false;
|
||||
@ -514,7 +515,7 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
|
||||
gle = (struct fun_dataop_gl *)req->dataop.imm;
|
||||
fun_dataop_gl_init(gle, 0, 0, len, dma);
|
||||
|
||||
q->info[idx].vaddr = data;
|
||||
q->info[idx].xdpf = xdpf;
|
||||
|
||||
u64_stats_update_begin(&q->syncp);
|
||||
q->stats.tx_bytes += len;
|
||||
@ -545,12 +546,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
|
||||
if (unlikely(q_idx >= fp->num_xdpqs))
|
||||
return -ENXIO;
|
||||
|
||||
for (q = xdpqs[q_idx], i = 0; i < n; i++) {
|
||||
const struct xdp_frame *xdpf = frames[i];
|
||||
|
||||
if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
|
||||
for (q = xdpqs[q_idx], i = 0; i < n; i++)
|
||||
if (!fun_xdp_tx(q, frames[i]))
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
fun_txq_wr_db(q);
|
||||
@ -577,7 +575,7 @@ static void fun_xdpq_purge(struct funeth_txq *q)
|
||||
unsigned int idx = q->cons_cnt & q->mask;
|
||||
|
||||
fun_xdp_unmap(q, idx);
|
||||
page_frag_free(q->info[idx].vaddr);
|
||||
xdp_return_frame(q->info[idx].xdpf);
|
||||
q->cons_cnt++;
|
||||
}
|
||||
}
|
||||
|
@ -95,8 +95,8 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
|
||||
|
||||
struct funeth_tx_info { /* per Tx descriptor state */
|
||||
union {
|
||||
struct sk_buff *skb; /* associated packet */
|
||||
void *vaddr; /* start address for XDP */
|
||||
struct sk_buff *skb; /* associated packet (sk_buff path) */
|
||||
struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
|
||||
};
|
||||
};
|
||||
|
||||
@ -245,7 +245,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
|
||||
int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
|
||||
int fun_txq_napi_poll(struct napi_struct *napi, int budget);
|
||||
netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
|
||||
bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
|
||||
int fun_xdp_xmit_frames(struct net_device *dev, int n,
|
||||
struct xdp_frame **frames, u32 flags);
|
||||
|
||||
|
@ -1925,11 +1925,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
||||
* non-zero req_queue_pairs says that user requested a new
|
||||
* queue count via ethtool's set_channels, so use this
|
||||
* value for queues distribution across traffic classes
|
||||
* We need at least one queue pair for the interface
|
||||
* to be usable as we see in else statement.
|
||||
*/
|
||||
if (vsi->req_queue_pairs > 0)
|
||||
vsi->num_queue_pairs = vsi->req_queue_pairs;
|
||||
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
vsi->num_queue_pairs = pf->num_lan_msix;
|
||||
else
|
||||
vsi->num_queue_pairs = 1;
|
||||
}
|
||||
|
||||
/* Number of queues per enabled TC */
|
||||
|
@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
|
||||
rx_desc = ICE_RX_DESC(rx_ring, i);
|
||||
|
||||
if (!(rx_desc->wb.status_error0 &
|
||||
cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
|
||||
(cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
|
||||
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
|
||||
continue;
|
||||
|
||||
rx_buf = &rx_ring->rx_buf[i];
|
||||
|
@ -4656,6 +4656,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
ice_set_safe_mode_caps(hw);
|
||||
}
|
||||
|
||||
hw->ucast_shared = true;
|
||||
|
||||
err = ice_init_pf(pf);
|
||||
if (err) {
|
||||
dev_err(dev, "ice_init_pf failed: %d\n", err);
|
||||
@ -6011,10 +6013,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
|
||||
if (vsi->netdev) {
|
||||
ice_set_rx_mode(vsi->netdev);
|
||||
|
||||
err = ice_vsi_vlan_setup(vsi);
|
||||
if (vsi->type != ICE_VSI_LB) {
|
||||
err = ice_vsi_vlan_setup(vsi);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
ice_vsi_cfg_dcb_rings(vsi);
|
||||
|
||||
|
@ -1309,39 +1309,6 @@ out_put_vf:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
|
||||
* @pf: PF used to reference the switch's rules
|
||||
* @umac: unicast MAC to compare against existing switch rules
|
||||
*
|
||||
* Return true on the first/any match, else return false
|
||||
*/
|
||||
static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
|
||||
{
|
||||
struct ice_sw_recipe *mac_recipe_list =
|
||||
&pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
|
||||
struct ice_fltr_mgmt_list_entry *list_itr;
|
||||
struct list_head *rule_head;
|
||||
struct mutex *rule_lock; /* protect MAC filter list access */
|
||||
|
||||
rule_head = &mac_recipe_list->filt_rules;
|
||||
rule_lock = &mac_recipe_list->filt_rule_lock;
|
||||
|
||||
mutex_lock(rule_lock);
|
||||
list_for_each_entry(list_itr, rule_head, list_entry) {
|
||||
u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
|
||||
|
||||
if (ether_addr_equal(existing_mac, umac)) {
|
||||
mutex_unlock(rule_lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(rule_lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vf_mac
|
||||
* @netdev: network interface device structure
|
||||
@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
if (ret)
|
||||
goto out_put_vf;
|
||||
|
||||
if (ice_unicast_mac_exists(pf, mac)) {
|
||||
netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
|
||||
mac, vf_id, mac);
|
||||
ret = -EINVAL;
|
||||
goto out_put_vf;
|
||||
}
|
||||
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
|
||||
/* VF is notified of its new MAC via the PF's response to the
|
||||
|
@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
|
||||
|
||||
protocol = vlan_get_protocol(skb);
|
||||
|
||||
if (eth_p_mpls(protocol))
|
||||
if (eth_p_mpls(protocol)) {
|
||||
ip.hdr = skb_inner_network_header(skb);
|
||||
else
|
||||
l4.hdr = skb_checksum_start(skb);
|
||||
} else {
|
||||
ip.hdr = skb_network_header(skb);
|
||||
l4.hdr = skb_checksum_start(skb);
|
||||
l4.hdr = skb_transport_header(skb);
|
||||
}
|
||||
|
||||
/* compute outer L2 header size */
|
||||
l2_len = ip.hdr - skb->data;
|
||||
|
@ -2948,7 +2948,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
|
||||
struct virtchnl_vlan_filtering_caps *vfc,
|
||||
struct virtchnl_vlan_filter_list_v2 *vfl)
|
||||
{
|
||||
u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
|
||||
u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
|
||||
vfl->num_elements;
|
||||
|
||||
if (num_requested_filters > vfc->max_filters)
|
||||
return false;
|
||||
|
@ -28,6 +28,9 @@
|
||||
#define MAX_RATE_EXPONENT 0x0FULL
|
||||
#define MAX_RATE_MANTISSA 0xFFULL
|
||||
|
||||
#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
|
||||
#define CN10K_MAX_BURST_SIZE 8453888ULL
|
||||
|
||||
/* Bitfields in NIX_TLX_PIR register */
|
||||
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
|
||||
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
|
||||
@ -35,6 +38,9 @@
|
||||
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
|
||||
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
|
||||
|
||||
#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
|
||||
#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
|
||||
|
||||
struct otx2_tc_flow_stats {
|
||||
u64 bytes;
|
||||
u64 pkts;
|
||||
@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
|
||||
|
||||
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
|
||||
u32 *burst_mantissa)
|
||||
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
|
||||
u32 *burst_exp, u32 *burst_mantissa)
|
||||
{
|
||||
int max_burst, max_mantissa;
|
||||
unsigned int tmp;
|
||||
|
||||
if (is_dev_otx2(nic->pdev)) {
|
||||
max_burst = MAX_BURST_SIZE;
|
||||
max_mantissa = MAX_BURST_MANTISSA;
|
||||
} else {
|
||||
max_burst = CN10K_MAX_BURST_SIZE;
|
||||
max_mantissa = CN10K_MAX_BURST_MANTISSA;
|
||||
}
|
||||
|
||||
/* Burst is calculated as
|
||||
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
|
||||
* Max supported burst size is 130,816 bytes.
|
||||
*/
|
||||
burst = min_t(u32, burst, MAX_BURST_SIZE);
|
||||
burst = min_t(u32, burst, max_burst);
|
||||
if (burst) {
|
||||
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
|
||||
tmp = burst - rounddown_pow_of_two(burst);
|
||||
if (burst < MAX_BURST_MANTISSA)
|
||||
if (burst < max_mantissa)
|
||||
*burst_mantissa = tmp * 2;
|
||||
else
|
||||
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
|
||||
} else {
|
||||
*burst_exp = MAX_BURST_EXPONENT;
|
||||
*burst_mantissa = MAX_BURST_MANTISSA;
|
||||
*burst_mantissa = max_mantissa;
|
||||
}
|
||||
}
|
||||
|
||||
static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
|
||||
static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
|
||||
u32 *mantissa, u32 *div_exp)
|
||||
{
|
||||
unsigned int tmp;
|
||||
u64 tmp;
|
||||
|
||||
/* Rate calculation by hardware
|
||||
*
|
||||
@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
|
||||
}
|
||||
}
|
||||
|
||||
static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
|
||||
static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
|
||||
u64 maxrate, u32 burst)
|
||||
{
|
||||
u32 burst_exp, burst_mantissa;
|
||||
u32 exp, mantissa, div_exp;
|
||||
u64 regval = 0;
|
||||
|
||||
/* Get exponent and mantissa values from the desired rate */
|
||||
otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
|
||||
otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
|
||||
|
||||
if (is_dev_otx2(nic->pdev)) {
|
||||
regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
|
||||
FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
|
||||
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
|
||||
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
|
||||
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
|
||||
} else {
|
||||
regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
|
||||
FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
|
||||
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
|
||||
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
|
||||
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
|
||||
}
|
||||
|
||||
return regval;
|
||||
}
|
||||
|
||||
static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
|
||||
u32 burst, u64 maxrate)
|
||||
{
|
||||
struct otx2_hw *hw = &nic->hw;
|
||||
struct nix_txschq_config *req;
|
||||
u32 burst_exp, burst_mantissa;
|
||||
u32 exp, mantissa, div_exp;
|
||||
int txschq, err;
|
||||
|
||||
/* All SQs share the same TL4, so pick the first scheduler */
|
||||
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
|
||||
|
||||
/* Get exponent and mantissa values from the desired rate */
|
||||
otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
|
||||
otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
|
||||
|
||||
mutex_lock(&nic->mbox.lock);
|
||||
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
|
||||
if (!req) {
|
||||
@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
|
||||
req->lvl = NIX_TXSCH_LVL_TL4;
|
||||
req->num_regs = 1;
|
||||
req->reg[0] = NIX_AF_TL4X_PIR(txschq);
|
||||
req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
|
||||
FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
|
||||
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
|
||||
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
|
||||
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
|
||||
req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
|
||||
|
||||
err = otx2_sync_mbox_msg(&nic->mbox);
|
||||
mutex_unlock(&nic->mbox.lock);
|
||||
@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
|
||||
struct netlink_ext_ack *extack = cls->common.extack;
|
||||
struct flow_action *actions = &cls->rule->action;
|
||||
struct flow_action_entry *entry;
|
||||
u32 rate;
|
||||
u64 rate;
|
||||
int err;
|
||||
|
||||
err = otx2_tc_validate_flow(nic, actions, extack);
|
||||
@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
|
||||
}
|
||||
/* Convert bytes per second to Mbps */
|
||||
rate = entry->police.rate_bytes_ps * 8;
|
||||
rate = max_t(u32, rate / 1000000, 1);
|
||||
rate = max_t(u64, rate / 1000000, 1);
|
||||
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
|
||||
if (err)
|
||||
return err;
|
||||
@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
||||
|
||||
flow_spec->dport = match.key->dst;
|
||||
flow_mask->dport = match.mask->dst;
|
||||
if (ip_proto == IPPROTO_UDP)
|
||||
req->features |= BIT_ULL(NPC_DPORT_UDP);
|
||||
else if (ip_proto == IPPROTO_TCP)
|
||||
req->features |= BIT_ULL(NPC_DPORT_TCP);
|
||||
else if (ip_proto == IPPROTO_SCTP)
|
||||
req->features |= BIT_ULL(NPC_DPORT_SCTP);
|
||||
|
||||
if (flow_mask->dport) {
|
||||
if (ip_proto == IPPROTO_UDP)
|
||||
req->features |= BIT_ULL(NPC_DPORT_UDP);
|
||||
else if (ip_proto == IPPROTO_TCP)
|
||||
req->features |= BIT_ULL(NPC_DPORT_TCP);
|
||||
else if (ip_proto == IPPROTO_SCTP)
|
||||
req->features |= BIT_ULL(NPC_DPORT_SCTP);
|
||||
}
|
||||
|
||||
flow_spec->sport = match.key->src;
|
||||
flow_mask->sport = match.mask->src;
|
||||
if (ip_proto == IPPROTO_UDP)
|
||||
req->features |= BIT_ULL(NPC_SPORT_UDP);
|
||||
else if (ip_proto == IPPROTO_TCP)
|
||||
req->features |= BIT_ULL(NPC_SPORT_TCP);
|
||||
else if (ip_proto == IPPROTO_SCTP)
|
||||
req->features |= BIT_ULL(NPC_SPORT_SCTP);
|
||||
|
||||
if (flow_mask->sport) {
|
||||
if (ip_proto == IPPROTO_UDP)
|
||||
req->features |= BIT_ULL(NPC_SPORT_UDP);
|
||||
else if (ip_proto == IPPROTO_TCP)
|
||||
req->features |= BIT_ULL(NPC_SPORT_TCP);
|
||||
else if (ip_proto == IPPROTO_SCTP)
|
||||
req->features |= BIT_ULL(NPC_SPORT_SCTP);
|
||||
}
|
||||
}
|
||||
|
||||
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
|
||||
|
@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
|
||||
}
|
||||
|
||||
/* If the chain is ended by an load/store pair then this
|
||||
* could serve as the new head of the the next chain.
|
||||
* could serve as the new head of the next chain.
|
||||
*/
|
||||
if (curr_pair_is_memcpy(meta1, meta2)) {
|
||||
head_ld_meta = meta1;
|
||||
|
@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
|
||||
|
||||
tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
|
||||
if (tx_queue && tx_queue->timestamping) {
|
||||
/* This code invokes normal driver TX code which is always
|
||||
* protected from softirqs when called from generic TX code,
|
||||
* which in turn disables preemption. Look at __dev_queue_xmit
|
||||
* which uses rcu_read_lock_bh disabling preemption for RCU
|
||||
* plus disabling softirqs. We do not need RCU reader
|
||||
* protection here.
|
||||
*
|
||||
* Although it is theoretically safe for current PTP TX/RX code
|
||||
* running without disabling softirqs, there are three good
|
||||
* reasond for doing so:
|
||||
*
|
||||
* 1) The code invoked is mainly implemented for non-PTP
|
||||
* packets and it is always executed with softirqs
|
||||
* disabled.
|
||||
* 2) This being a single PTP packet, better to not
|
||||
* interrupt its processing by softirqs which can lead
|
||||
* to high latencies.
|
||||
* 3) netdev_xmit_more checks preemption is disabled and
|
||||
* triggers a BUG_ON if not.
|
||||
*/
|
||||
local_bh_disable();
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
local_bh_enable();
|
||||
} else {
|
||||
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -688,18 +688,19 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
|
||||
|
||||
ret = mediatek_dwmac_clks_config(priv_plat, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (ret) {
|
||||
stmmac_remove_config_dt(pdev, plat_dat);
|
||||
if (ret)
|
||||
goto err_drv_probe;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_drv_probe:
|
||||
mediatek_dwmac_clks_config(priv_plat, false);
|
||||
err_remove_config_dt:
|
||||
stmmac_remove_config_dt(pdev, plat_dat);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
|
||||
|
||||
/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
|
||||
* QMI response, but contains other information as well. Currently we
|
||||
* simply wait for the the INIT_DRIVER transaction to complete and
|
||||
* simply wait for the INIT_DRIVER transaction to complete and
|
||||
* ignore any other data that might be returned.
|
||||
*/
|
||||
struct ipa_init_modem_driver_rsp {
|
||||
|
@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
|
||||
#define DEFAULT_SEND_SCI true
|
||||
#define DEFAULT_ENCRYPT false
|
||||
#define DEFAULT_ENCODING_SA 0
|
||||
#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
|
||||
|
||||
static bool send_sci(const struct macsec_secy *secy)
|
||||
{
|
||||
@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
|
||||
return false;
|
||||
|
||||
if (attrs[MACSEC_SA_ATTR_PN] &&
|
||||
*(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
||||
nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
||||
return false;
|
||||
|
||||
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
|
||||
@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
|
||||
if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
|
||||
if (tb_sa[MACSEC_SA_ATTR_PN] &&
|
||||
nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
|
||||
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
|
||||
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
|
||||
rtnl_unlock();
|
||||
@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
||||
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
|
||||
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
|
||||
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
|
||||
MACSEC_SA_ATTR_SALT);
|
||||
MACSEC_SALT_LEN);
|
||||
rtnl_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
kfree(rx_sa);
|
||||
macsec_rxsa_put(rx_sa);
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
|
||||
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
|
||||
return false;
|
||||
|
||||
if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
||||
if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
||||
return false;
|
||||
|
||||
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
|
||||
@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
|
||||
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
|
||||
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
|
||||
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
|
||||
MACSEC_SA_ATTR_SALT);
|
||||
MACSEC_SALT_LEN);
|
||||
rtnl_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
cleanup:
|
||||
secy->operational = was_operational;
|
||||
kfree(tx_sa);
|
||||
macsec_txsa_put(tx_sa);
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
|
||||
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
|
||||
return false;
|
||||
|
||||
if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
||||
if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
||||
return false;
|
||||
|
||||
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
|
||||
@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev,
|
||||
secy->operational = tx_sa && tx_sa->active;
|
||||
}
|
||||
|
||||
if (data[IFLA_MACSEC_WINDOW])
|
||||
secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
|
||||
|
||||
if (data[IFLA_MACSEC_ENCRYPT])
|
||||
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
|
||||
|
||||
@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (data[IFLA_MACSEC_WINDOW]) {
|
||||
secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
|
||||
|
||||
/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
|
||||
* for XPN cipher suites */
|
||||
if (secy->xpn &&
|
||||
secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
|
||||
ret = macsec_changelink_common(dev, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto cleanup;
|
||||
|
||||
/* If h/w offloading is available, propagate to the device */
|
||||
if (macsec_is_offloaded(macsec)) {
|
||||
|
@ -896,7 +896,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
|
||||
*/
|
||||
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
|
||||
if (ret < 0)
|
||||
return false;
|
||||
return ret;
|
||||
|
||||
if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
|
||||
int speed_value;
|
||||
|
@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
|
||||
int can_low_power = 1;
|
||||
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
|
||||
can_low_power = 0;
|
||||
of_node_put(np);
|
||||
if (can_low_power) {
|
||||
/* Enable automatic low-power */
|
||||
sungem_phy_write(phy, 0x1c, 0x9002);
|
||||
|
@ -242,9 +242,15 @@ struct virtnet_info {
|
||||
/* Packet virtio header size */
|
||||
u8 hdr_len;
|
||||
|
||||
/* Work struct for refilling if we run low on memory. */
|
||||
/* Work struct for delayed refilling if we run low on memory. */
|
||||
struct delayed_work refill;
|
||||
|
||||
/* Is delayed refill enabled? */
|
||||
bool refill_enabled;
|
||||
|
||||
/* The lock to synchronize the access to refill_enabled */
|
||||
spinlock_t refill_lock;
|
||||
|
||||
/* Work struct for config space updates */
|
||||
struct work_struct config_work;
|
||||
|
||||
@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
|
||||
return p;
|
||||
}
|
||||
|
||||
static void enable_delayed_refill(struct virtnet_info *vi)
|
||||
{
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
vi->refill_enabled = true;
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
}
|
||||
|
||||
static void disable_delayed_refill(struct virtnet_info *vi)
|
||||
{
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
vi->refill_enabled = false;
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
}
|
||||
|
||||
static void virtqueue_napi_schedule(struct napi_struct *napi,
|
||||
struct virtqueue *vq)
|
||||
{
|
||||
@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
||||
}
|
||||
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
|
||||
spin_lock(&vi->refill_lock);
|
||||
if (vi->refill_enabled)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
spin_unlock(&vi->refill_lock);
|
||||
}
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&rq->stats.syncp);
|
||||
@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i, err;
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
if (i < vi->curr_queue_pairs)
|
||||
/* Make sure we have some buffers: if oom use wq. */
|
||||
@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
/* Make sure NAPI doesn't schedule refill work */
|
||||
disable_delayed_refill(vi);
|
||||
/* Make sure refill_work doesn't re-enable napi! */
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
enable_delayed_refill(vi);
|
||||
|
||||
if (netif_running(vi->dev)) {
|
||||
err = virtnet_open(vi->dev);
|
||||
if (err)
|
||||
@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
vdev->priv = vi;
|
||||
|
||||
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
|
||||
spin_lock_init(&vi->refill_lock);
|
||||
|
||||
/* If we can receive ANY GSO packets, we must allocate large ones. */
|
||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
|
||||
|
@ -3515,6 +3515,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
|
||||
|
@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
|
||||
depends on !S390
|
||||
depends on COMMON_CLK
|
||||
select NET_DEVLINK
|
||||
select CRC16
|
||||
help
|
||||
This driver adds support for an OpenCompute time card.
|
||||
|
||||
|
@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
||||
if (!atomic_read(&queue->set_pci_flags_count)) {
|
||||
/*
|
||||
* there's no outstanding PCI any more, so we
|
||||
* have to request a PCI to be sure the the PCI
|
||||
* have to request a PCI to be sure the PCI
|
||||
* will wake at some time in the future then we
|
||||
* can flush packed buffers that might still be
|
||||
* hanging around, which can happen if no
|
||||
|
@ -11386,6 +11386,7 @@ scsih_shutdown(struct pci_dev *pdev)
|
||||
_scsih_ir_shutdown(ioc);
|
||||
_scsih_nvme_shutdown(ioc);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_stop_watchdog(ioc);
|
||||
ioc->shost_recovery = 1;
|
||||
mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
|
||||
ioc->shost_recovery = 0;
|
||||
|
@ -450,7 +450,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
|
||||
goto out_put_request;
|
||||
|
||||
ret = 0;
|
||||
if (hdr->iovec_count) {
|
||||
if (hdr->iovec_count && hdr->dxfer_len) {
|
||||
struct iov_iter i;
|
||||
struct iovec *iov = NULL;
|
||||
|
||||
|
@ -2953,37 +2953,59 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp, int max_timeout)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned long time_left;
|
||||
unsigned long time_left = msecs_to_jiffies(max_timeout);
|
||||
unsigned long flags;
|
||||
bool pending;
|
||||
int err;
|
||||
|
||||
retry:
|
||||
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
|
||||
msecs_to_jiffies(max_timeout));
|
||||
time_left);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->dev_cmd.complete = NULL;
|
||||
if (likely(time_left)) {
|
||||
/*
|
||||
* The completion handler called complete() and the caller of
|
||||
* this function still owns the @lrbp tag so the code below does
|
||||
* not trigger any race conditions.
|
||||
*/
|
||||
hba->dev_cmd.complete = NULL;
|
||||
err = ufshcd_get_tr_ocs(lrbp);
|
||||
if (!err)
|
||||
err = ufshcd_dev_cmd_completion(hba, lrbp);
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
if (!time_left) {
|
||||
} else {
|
||||
err = -ETIMEDOUT;
|
||||
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
|
||||
__func__, lrbp->task_tag);
|
||||
if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))
|
||||
if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
|
||||
/* successfully cleared the command, retry if needed */
|
||||
err = -EAGAIN;
|
||||
/*
|
||||
* in case of an error, after clearing the doorbell,
|
||||
* we also need to clear the outstanding_request
|
||||
* field in hba
|
||||
*/
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
__clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
/*
|
||||
* Since clearing the command succeeded we also need to
|
||||
* clear the task tag bit from the outstanding_reqs
|
||||
* variable.
|
||||
*/
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
pending = test_bit(lrbp->task_tag,
|
||||
&hba->outstanding_reqs);
|
||||
if (pending) {
|
||||
hba->dev_cmd.complete = NULL;
|
||||
__clear_bit(lrbp->task_tag,
|
||||
&hba->outstanding_reqs);
|
||||
}
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
if (!pending) {
|
||||
/*
|
||||
* The completion handler ran while we tried to
|
||||
* clear the command.
|
||||
*/
|
||||
time_left = 1;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
dev_err(hba->dev, "%s: failed to clear tag %d\n",
|
||||
__func__, lrbp->task_tag);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -108,9 +108,20 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool phandle_exists(const struct device_node *np,
|
||||
const char *phandle_name, int index)
|
||||
{
|
||||
struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
|
||||
|
||||
if (parse_np)
|
||||
of_node_put(parse_np);
|
||||
|
||||
return parse_np != NULL;
|
||||
}
|
||||
|
||||
#define MAX_PROP_SIZE 32
|
||||
static int ufshcd_populate_vreg(struct device *dev, const char *name,
|
||||
struct ufs_vreg **out_vreg)
|
||||
struct ufs_vreg **out_vreg)
|
||||
{
|
||||
char prop_name[MAX_PROP_SIZE];
|
||||
struct ufs_vreg *vreg = NULL;
|
||||
@ -122,7 +133,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
|
||||
}
|
||||
|
||||
snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
|
||||
if (!of_parse_phandle(np, prop_name, 0)) {
|
||||
if (!phandle_exists(np, prop_name, 0)) {
|
||||
dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
|
||||
__func__, prop_name);
|
||||
goto out;
|
||||
|
@ -1125,9 +1125,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
|
||||
extern int devmem_is_allowed(unsigned long pfn);
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -130,7 +130,6 @@ enum cpuhp_state {
|
||||
CPUHP_ZCOMP_PREPARE,
|
||||
CPUHP_TIMERS_PREPARE,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
CPUHP_LOONGARCH_SOC_PREPARE,
|
||||
CPUHP_BP_PREPARE_DYN,
|
||||
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
|
||||
CPUHP_BRINGUP_CPU,
|
||||
|
@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
|
||||
{
|
||||
const struct inet6_dev *idev = __in6_dev_get(dev);
|
||||
|
||||
if (unlikely(!idev))
|
||||
return true;
|
||||
|
||||
return !!idev->cnf.ignore_routes_with_linkdown;
|
||||
}
|
||||
|
||||
|
@ -847,6 +847,7 @@ enum {
|
||||
};
|
||||
|
||||
void l2cap_chan_hold(struct l2cap_chan *c);
|
||||
struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
|
||||
void l2cap_chan_put(struct l2cap_chan *c);
|
||||
|
||||
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
|
||||
|
@ -321,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
||||
|
||||
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
|
||||
|
||||
#define TCP_PINGPONG_THRESH 3
|
||||
#define TCP_PINGPONG_THRESH 1
|
||||
|
||||
static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
|
||||
{
|
||||
@ -338,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
|
||||
return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
|
||||
}
|
||||
|
||||
static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (icsk->icsk_ack.pingpong < U8_MAX)
|
||||
icsk->icsk_ack.pingpong++;
|
||||
}
|
||||
|
||||
static inline bool inet_csk_has_ulp(struct sock *sk)
|
||||
{
|
||||
return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
|
||||
|
@ -2843,18 +2843,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
|
||||
{
|
||||
/* Does this proto have per netns sysctl_wmem ? */
|
||||
if (proto->sysctl_wmem_offset)
|
||||
return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
|
||||
return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
|
||||
|
||||
return *proto->sysctl_wmem;
|
||||
return READ_ONCE(*proto->sysctl_wmem);
|
||||
}
|
||||
|
||||
static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
|
||||
{
|
||||
/* Does this proto have per netns sysctl_rmem ? */
|
||||
if (proto->sysctl_rmem_offset)
|
||||
return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
|
||||
return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
|
||||
|
||||
return *proto->sysctl_rmem;
|
||||
return READ_ONCE(*proto->sysctl_rmem);
|
||||
}
|
||||
|
||||
/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
|
||||
|
@ -1419,7 +1419,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
|
||||
|
||||
static inline int tcp_win_from_space(const struct sock *sk, int space)
|
||||
{
|
||||
int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
|
||||
int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
|
||||
|
||||
return tcp_adv_win_scale <= 0 ?
|
||||
(space>>(-tcp_adv_win_scale)) :
|
||||
|
@ -192,6 +192,7 @@ struct f_owner_ex {
|
||||
|
||||
#define F_LINUX_SPECIFIC_BASE 1024
|
||||
|
||||
#ifndef HAVE_ARCH_STRUCT_FLOCK
|
||||
struct flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
@ -216,5 +217,6 @@ struct flock64 {
|
||||
__ARCH_FLOCK64_PAD
|
||||
#endif
|
||||
};
|
||||
#endif /* HAVE_ARCH_STRUCT_FLOCK */
|
||||
|
||||
#endif /* _ASM_GENERIC_FCNTL_H */
|
||||
|
@ -7,12 +7,11 @@ CONFIG_DEBUG_SLAB=y
|
||||
CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_PAGEALLOC=y
|
||||
CONFIG_SLUB_DEBUG_ON=y
|
||||
CONFIG_KMEMCHECK=y
|
||||
CONFIG_DEBUG_OBJECTS=y
|
||||
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
|
||||
CONFIG_GCOV_KERNEL=y
|
||||
CONFIG_LOCKDEP=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_VMLINUX_VALIDATION=y
|
||||
CONFIG_NOINSTR_VALIDATION=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
|
@ -335,8 +335,6 @@ struct rwsem_waiter {
|
||||
struct task_struct *task;
|
||||
enum rwsem_waiter_type type;
|
||||
unsigned long timeout;
|
||||
|
||||
/* Writer only, not initialized in reader */
|
||||
bool handoff_set;
|
||||
};
|
||||
#define rwsem_first_waiter(sem) \
|
||||
@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
|
||||
* to give up the lock), request a HANDOFF to
|
||||
* force the issue.
|
||||
*/
|
||||
if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
|
||||
time_after(jiffies, waiter->timeout)) {
|
||||
adjustment -= RWSEM_FLAG_HANDOFF;
|
||||
lockevent_inc(rwsem_rlock_handoff);
|
||||
if (time_after(jiffies, waiter->timeout)) {
|
||||
if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
|
||||
adjustment -= RWSEM_FLAG_HANDOFF;
|
||||
lockevent_inc(rwsem_rlock_handoff);
|
||||
}
|
||||
waiter->handoff_set = true;
|
||||
}
|
||||
|
||||
atomic_long_add(-adjustment, &sem->count);
|
||||
@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
|
||||
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
|
||||
struct rwsem_waiter *waiter)
|
||||
{
|
||||
bool first = rwsem_first_waiter(sem) == waiter;
|
||||
struct rwsem_waiter *first = rwsem_first_waiter(sem);
|
||||
long count, new;
|
||||
|
||||
lockdep_assert_held(&sem->wait_lock);
|
||||
@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
|
||||
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
|
||||
|
||||
if (has_handoff) {
|
||||
if (!first)
|
||||
/*
|
||||
* Honor handoff bit and yield only when the first
|
||||
* waiter is the one that set it. Otherwisee, we
|
||||
* still try to acquire the rwsem.
|
||||
*/
|
||||
if (first->handoff_set && (waiter != first))
|
||||
return false;
|
||||
|
||||
/* First waiter inherits a previously set handoff bit */
|
||||
waiter->handoff_set = true;
|
||||
/*
|
||||
* First waiter can inherit a previously set handoff
|
||||
* bit and spin on rwsem if lock acquisition fails.
|
||||
*/
|
||||
if (waiter == first)
|
||||
waiter->handoff_set = true;
|
||||
}
|
||||
|
||||
new = count;
|
||||
@ -1027,6 +1036,7 @@ queue:
|
||||
waiter.task = current;
|
||||
waiter.type = RWSEM_WAITING_FOR_READ;
|
||||
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
|
||||
waiter.handoff_set = false;
|
||||
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
if (list_empty(&sem->wait_list)) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user