mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
Merge branch 'for-next/rng' into for-next/core
* for-next/rng: (2 commits) arm64: Use v8.5-RNG entropy for KASLR seed ...
This commit is contained in:
commit
bc20606594
@ -117,6 +117,8 @@ infrastructure:
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
+------------------------------+---------+---------+
|
||||
| RNDR | [63-60] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| TS | [55-52] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| FHM | [51-48] | y |
|
||||
|
@ -232,6 +232,10 @@ HWCAP2_DGH
|
||||
|
||||
Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001.
|
||||
|
||||
HWCAP2_RNG
|
||||
|
||||
Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
-----------------------
|
||||
|
||||
|
@ -1528,6 +1528,14 @@ config ARM64_E0PD
|
||||
|
||||
This option enables E0PD for TTBR1 where available.
|
||||
|
||||
config ARCH_RANDOM
|
||||
bool "Enable support for random number generation"
|
||||
default y
|
||||
help
|
||||
Random number generation (part of the ARMv8.5 Extensions)
|
||||
provides a high bandwidth, cryptographically secure
|
||||
hardware random number generator.
|
||||
|
||||
endmenu
|
||||
|
||||
config ARM64_SVE
|
||||
|
75
arch/arm64/include/asm/archrandom.h
Normal file
75
arch/arm64/include/asm/archrandom.h
Normal file
@ -0,0 +1,75 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_ARCHRANDOM_H
|
||||
#define _ASM_ARCHRANDOM_H
|
||||
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
|
||||
#include <linux/random.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
static inline bool __arm64_rndr(unsigned long *v)
|
||||
{
|
||||
bool ok;
|
||||
|
||||
/*
|
||||
* Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
|
||||
* and set PSTATE.NZCV to 0b0100 otherwise.
|
||||
*/
|
||||
asm volatile(
|
||||
__mrs_s("%0", SYS_RNDR_EL0) "\n"
|
||||
" cset %w1, ne\n"
|
||||
: "=r" (*v), "=r" (ok)
|
||||
:
|
||||
: "cc");
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
static inline bool __must_check arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool __must_check arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
/*
|
||||
* Only support the generic interface after we have detected
|
||||
* the system wide capability, avoiding complexity with the
|
||||
* cpufeature code and with potential scheduling between CPUs
|
||||
* with and without the feature.
|
||||
*/
|
||||
if (!cpus_have_const_cap(ARM64_HAS_RNG))
|
||||
return false;
|
||||
|
||||
return __arm64_rndr(v);
|
||||
}
|
||||
|
||||
|
||||
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
unsigned long val;
|
||||
bool ok = arch_get_random_seed_long(&val);
|
||||
|
||||
*v = val;
|
||||
return ok;
|
||||
}
|
||||
|
||||
static inline bool __init __early_cpu_has_rndr(void)
|
||||
{
|
||||
/* Open code as we run prior to the first call to cpufeature. */
|
||||
unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
|
||||
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline bool __arm64_rndr(unsigned long *v) { return false; }
|
||||
static inline bool __init __early_cpu_has_rndr(void) { return false; }
|
||||
|
||||
#endif /* CONFIG_ARCH_RANDOM */
|
||||
#endif /* _ASM_ARCHRANDOM_H */
|
@ -57,7 +57,8 @@
|
||||
#define ARM64_WORKAROUND_1542419 47
|
||||
#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48
|
||||
#define ARM64_HAS_E0PD 49
|
||||
#define ARM64_HAS_RNG 50
|
||||
|
||||
#define ARM64_NCAPS 50
|
||||
#define ARM64_NCAPS 51
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
@ -91,8 +91,9 @@
|
||||
#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM)
|
||||
#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16)
|
||||
#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM)
|
||||
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
|
||||
#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16)
|
||||
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
|
||||
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
|
@ -366,6 +366,9 @@
|
||||
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
|
||||
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
|
||||
|
||||
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
|
||||
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
|
||||
|
||||
#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
|
||||
#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
|
||||
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
|
||||
@ -552,6 +555,7 @@
|
||||
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
|
||||
|
||||
/* id_aa64isar0 */
|
||||
#define ID_AA64ISAR0_RNDR_SHIFT 60
|
||||
#define ID_AA64ISAR0_TS_SHIFT 52
|
||||
#define ID_AA64ISAR0_FHM_SHIFT 48
|
||||
#define ID_AA64ISAR0_DP_SHIFT 44
|
||||
|
@ -72,5 +72,6 @@
|
||||
#define HWCAP2_I8MM (1 << 13)
|
||||
#define HWCAP2_BF16 (1 << 14)
|
||||
#define HWCAP2_DGH (1 << 15)
|
||||
#define HWCAP2_RNG (1 << 16)
|
||||
|
||||
#endif /* _UAPI__ASM_HWCAP_H */
|
||||
|
@ -121,6 +121,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
|
||||
* sync with the documentation of the CPU feature register ABI.
|
||||
*/
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
|
||||
@ -1658,6 +1659,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.min_field_value = 1,
|
||||
.cpu_enable = cpu_enable_e0pd,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
{
|
||||
.desc = "Random Number Generator",
|
||||
.capability = ARM64_HAS_RNG,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64ISAR0_EL1,
|
||||
.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.min_field_value = 1,
|
||||
},
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
@ -1736,6 +1749,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
|
||||
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
|
||||
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
|
||||
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
|
||||
|
@ -91,6 +91,7 @@ static const char *const hwcap_str[] = {
|
||||
"i8mm",
|
||||
"bf16",
|
||||
"dgh",
|
||||
"rng",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -120,6 +120,17 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mix in any entropy obtainable architecturally, open coded
|
||||
* since this runs extremely early.
|
||||
*/
|
||||
if (__early_cpu_has_rndr()) {
|
||||
unsigned long raw;
|
||||
|
||||
if (__arm64_rndr(&raw))
|
||||
seed ^= raw;
|
||||
}
|
||||
|
||||
if (!seed) {
|
||||
kaslr_status = KASLR_DISABLED_NO_SEED;
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user