Merge changes from topic "bk/context_refactor" into integration

* changes:
  refactor(pmu): convert FEAT_MTPMU to C and move to persistent register init
  feat(pmu): introduce pmuv3 lib/extensions folder
  fix(pmu): make MDCR_EL3.MTPME=1 out of reset
  refactor(cm): introduce a real manage_extensions_nonsecure()
This commit is contained in:
Manish Pandey 2023-06-29 12:31:26 +02:00 committed by TrustedFirmware Code Review
commit 26d67076e0
27 changed files with 434 additions and 341 deletions

View File

@ -523,6 +523,7 @@ BL_COMMON_SOURCES += common/bl_common.c \
drivers/console/multi_console.c \
lib/${ARCH}/cache_helpers.S \
lib/${ARCH}/misc_helpers.S \
lib/extensions/pmuv3/${ARCH}/pmuv3.c \
plat/common/plat_bl_common.c \
plat/common/plat_log_common.c \
plat/common/${ARCH}/plat_common.c \
@ -1147,7 +1148,6 @@ $(eval $(call assert_booleans,\
CTX_INCLUDE_FPREGS \
CTX_INCLUDE_EL2_REGS \
DEBUG \
DISABLE_MTPMU \
DYN_DISABLE_AUTH \
EL3_EXCEPTION_HANDLING \
ENABLE_AMU_AUXILIARY_COUNTERS \
@ -1225,6 +1225,7 @@ $(eval $(call assert_numerics,\
CTX_INCLUDE_MTE_REGS \
CTX_INCLUDE_NEVE_REGS \
CRYPTO_SUPPORT \
DISABLE_MTPMU \
ENABLE_BRBE_FOR_NS \
ENABLE_TRBE_FOR_NS \
ENABLE_BTI \

View File

@ -16,10 +16,6 @@ BL1_SOURCES += bl1/${ARCH}/bl1_arch_setup.c \
plat/common/${ARCH}/platform_up_stack.S \
${MBEDTLS_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL1_SOURCES += lib/extensions/mtpmu/${ARCH}/mtpmu.S
endif
ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/cpus/aarch64/dsu_helpers.S \
lib/el3_runtime/aarch64/context.S

View File

@ -43,10 +43,6 @@ BL2_SOURCES += bl2/${ARCH}/bl2_el3_entrypoint.S \
bl2/${ARCH}/bl2_run_next_image.S \
lib/cpus/${ARCH}/cpu_helpers.S
ifeq (${DISABLE_MTPMU},1)
BL2_SOURCES += lib/extensions/mtpmu/${ARCH}/mtpmu.S
endif
ifeq (${ARCH},aarch64)
BL2_SOURCES += lib/cpus/aarch64/dsu_helpers.S
endif

View File

@ -54,10 +54,6 @@ BL31_SOURCES += bl31/bl31_main.c \
${SPMC_SOURCES} \
${SPM_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL31_SOURCES += lib/extensions/mtpmu/aarch64/mtpmu.S
endif
ifeq (${ENABLE_PMF}, 1)
BL31_SOURCES += lib/pmf/pmf_main.c
endif

View File

@ -112,6 +112,9 @@ void bl31_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
******************************************************************************/
void bl31_main(void)
{
/* Init registers that never change for the lifetime of TF-A */
cm_manage_extensions_el3();
NOTICE("BL31: %s\n", version_string);
NOTICE("BL31: %s\n", build_message);

View File

@ -20,10 +20,6 @@ BL32_SOURCES += bl32/sp_min/sp_min_main.c \
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL32_SOURCES += lib/extensions/mtpmu/aarch32/mtpmu.S
endif
ifeq (${ENABLE_PMF}, 1)
BL32_SOURCES += lib/pmf/pmf_main.c
endif

View File

@ -144,6 +144,14 @@ void detect_arch_features(void)
check_feature(ENABLE_FEAT_SB, read_feat_sb_id_field(), "SB", 1, 1);
check_feature(ENABLE_FEAT_CSV2_2, read_feat_csv2_id_field(),
"CSV2_2", 2, 3);
/*
* Even though the PMUv3 is an OPTIONAL feature, it is always
* implemented and Arm prescribes so. So assume it will be there and do
* away with a flag for it. This is used to check minor PMUv3px
* revisions so that we catch them as they come along
*/
check_feature(FEAT_STATE_ALWAYS, read_feat_pmuv3_id_field(),
"PMUv3", 1, ID_AA64DFR0_PMUVER_PMUV3P7);
/* v8.1 features */
check_feature(ENABLE_FEAT_PAN, read_feat_pan_id_field(), "PAN", 1, 3);
@ -184,6 +192,13 @@ void detect_arch_features(void)
check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
"TWED", 1, 1);
/*
* even though this is a "DISABLE" it does confusingly perform feature
* enablement duties like all other flags here. Check it against the HW
* feature when we intend to diverge from the default behaviour
*/
check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(), "MTPMU", 1, 1);
/* v8.7 features */
check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(), "HCX", 1, 1);

View File

@ -207,10 +207,10 @@ Common build options
of the binary image. If set to 1, then only the ELF image is built.
0 is the default.
- ``DISABLE_MTPMU``: Boolean option to disable FEAT_MTPMU if implemented
(Armv8.6 onwards). Its default value is 0 to keep consistency with platforms
that do not implement FEAT_MTPMU. For more information on FEAT_MTPMU,
check the latest Arm ARM.
- ``DISABLE_MTPMU``: Numeric option to disable ``FEAT_MTPMU`` (Multi Threaded
PMU). ``FEAT_MTPMU`` is an optional feature available on Armv8.6 onwards.
This flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
mechanism. Default is ``0``.
- ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
Board Boot authentication at runtime. This option is meant to be enabled only

View File

@ -104,7 +104,11 @@
/* CSSELR definitions */
#define LEVEL_SHIFT U(1)
/* ID_DFR0_EL1 definitions */
/* ID_DFR0 definitions */
#define ID_DFR0_PERFMON_SHIFT U(24)
#define ID_DFR0_PERFMON_MASK U(0xf)
#define ID_DFR0_PERFMON_PMUV3 U(3)
#define ID_DFR0_PERFMON_PMUV3P5 U(6)
#define ID_DFR0_COPTRC_SHIFT U(12)
#define ID_DFR0_COPTRC_MASK U(0xf)
#define ID_DFR0_COPTRC_SUPPORTED U(1)
@ -118,6 +122,7 @@
#define ID_DFR1_MTPMU_SHIFT U(0)
#define ID_DFR1_MTPMU_MASK U(0xf)
#define ID_DFR1_MTPMU_SUPPORTED U(1)
#define ID_DFR1_MTPMU_DISABLED U(15)
/* ID_MMFR3 definitions */
#define ID_MMFR3_PAN_SHIFT U(16)
@ -464,6 +469,10 @@
#define PMCR_LP_BIT (U(1) << 7)
#define PMCR_LC_BIT (U(1) << 6)
#define PMCR_DP_BIT (U(1) << 5)
#define PMCR_X_BIT (U(1) << 4)
#define PMCR_C_BIT (U(1) << 2)
#define PMCR_P_BIT (U(1) << 1)
#define PMCR_E_BIT (U(1) << 0)
#define PMCR_RESET_VAL U(0x0)
/*******************************************************************************

View File

@ -162,4 +162,29 @@ static inline bool is_feat_s2pie_supported(void) { return false; }
static inline bool is_feat_s1pie_supported(void) { return false; }
static inline bool is_feat_sxpie_supported(void) { return false; }
static inline unsigned int read_feat_pmuv3_id_field(void)
{
return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_PERFMON);
}
static inline unsigned int read_feat_mtpmu_id_field(void)
{
return ISOLATE_FIELD(read_id_dfr1(), ID_DFR1_MTPMU);
}
static inline bool is_feat_mtpmu_supported(void)
{
if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
return false;
}
if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
return true;
}
unsigned int mtpmu = read_feat_mtpmu_id_field();
return mtpmu != 0U && mtpmu != ID_DFR1_MTPMU_DISABLED;
}
#endif /* ARCH_FEATURES_H */

View File

@ -221,6 +221,7 @@ DEFINE_COPROCR_READ_FUNC(midr, MIDR)
DEFINE_COPROCR_READ_FUNC(id_mmfr3, ID_MMFR3)
DEFINE_COPROCR_READ_FUNC(id_mmfr4, ID_MMFR4)
DEFINE_COPROCR_READ_FUNC(id_dfr0, ID_DFR0)
DEFINE_COPROCR_READ_FUNC(id_dfr1, ID_DFR1)
DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
DEFINE_COPROCR_READ_FUNC(isr, ISR)
@ -290,7 +291,7 @@ DEFINE_COPROCR_WRITE_FUNC_64(icc_asgi1r, ICC_ASGI1R_EL1_64)
DEFINE_COPROCR_RW_FUNCS(sdcr, SDCR)
DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
DEFINE_COPROCR_RW_FUNCS(pmcr, PMCR)
/*
* Address translation

View File

@ -277,10 +277,6 @@
cps #MODE32_mon
isb
#if DISABLE_MTPMU
bl mtpmu_disable
#endif
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.

View File

@ -221,6 +221,12 @@
#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
#define ID_AA64DFR0_TRACEFILT_LENGTH U(4)
#define ID_AA64DFR0_PMUVER_LENGTH U(4)
#define ID_AA64DFR0_PMUVER_SHIFT U(8)
#define ID_AA64DFR0_PMUVER_MASK U(0xf)
#define ID_AA64DFR0_PMUVER_PMUV3 U(1)
#define ID_AA64DFR0_PMUVER_PMUV3P7 U(7)
#define ID_AA64DFR0_PMUVER_IMP_DEF U(0xf)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32)
@ -237,6 +243,7 @@
#define ID_AA64DFR0_MTPMU_SHIFT U(48)
#define ID_AA64DFR0_MTPMU_MASK ULL(0xf)
#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
#define ID_AA64DFR0_MTPMU_DISABLED ULL(15)
/* ID_AA64DFR0_EL1.BRBE definitions */
#define ID_AA64DFR0_BRBE_SHIFT U(52)
@ -595,16 +602,16 @@
#define MDCR_TDOSA_BIT (ULL(1) << 10)
#define MDCR_TDA_BIT (ULL(1) << 9)
#define MDCR_TPM_BIT (ULL(1) << 6)
#define MDCR_EL3_RESET_VAL ULL(0x0)
#define MDCR_EL3_RESET_VAL MDCR_MTPME_BIT
/* MDCR_EL2 definitions */
#define MDCR_EL2_MTPME (U(1) << 28)
#define MDCR_EL2_HLP (U(1) << 26)
#define MDCR_EL2_HLP_BIT (U(1) << 26)
#define MDCR_EL2_E2TB(x) ((x) << 24)
#define MDCR_EL2_E2TB_EL1 U(0x3)
#define MDCR_EL2_HCCD (U(1) << 23)
#define MDCR_EL2_HCCD_BIT (U(1) << 23)
#define MDCR_EL2_TTRF (U(1) << 19)
#define MDCR_EL2_HPMD (U(1) << 17)
#define MDCR_EL2_HPMD_BIT (U(1) << 17)
#define MDCR_EL2_TPMS (U(1) << 14)
#define MDCR_EL2_E2PB(x) ((x) << 12)
#define MDCR_EL2_E2PB_EL1 U(0x3)
@ -615,6 +622,7 @@
#define MDCR_EL2_HPME_BIT (U(1) << 7)
#define MDCR_EL2_TPM_BIT (U(1) << 6)
#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
#define MDCR_EL2_HPMN_MASK U(0x1f)
#define MDCR_EL2_RESET_VAL U(0x0)
/* HSTR_EL2 definitions */

View File

@ -639,6 +639,7 @@ static inline bool is_feat_trbe_supported(void)
return read_feat_trbe_id_field() != 0U;
}
/*******************************************************************************
* Function to identify the presence of FEAT_SMEx (Scalar Matrix Extension)
******************************************************************************/
@ -699,4 +700,29 @@ static inline unsigned int read_id_aa64mmfr0_el0_tgran64_field(void)
ID_AA64MMFR0_EL1_TGRAN64);
}
static inline unsigned int read_feat_pmuv3_id_field(void)
{
return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_PMUVER);
}
static inline unsigned int read_feat_mtpmu_id_field(void)
{
return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_MTPMU);
}
static inline bool is_feat_mtpmu_supported(void)
{
if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
return false;
}
if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
return true;
}
unsigned int mtpmu = read_feat_mtpmu_id_field();
return (mtpmu != 0U) && (mtpmu != ID_AA64DFR0_MTPMU_DISABLED);
}
#endif /* ARCH_FEATURES_H */

View File

@ -103,7 +103,7 @@
*/
mov_imm x0, ((MDCR_EL2_RESET_VAL | \
MDCR_SPD32(MDCR_SPD32_DISABLE)) \
& ~(MDCR_EL2_HPMD | MDCR_TDOSA_BIT | \
& ~(MDCR_EL2_HPMD_BIT | MDCR_TDOSA_BIT | \
MDCR_TDA_BIT | MDCR_TPM_BIT))
msr mdcr_el2, x0
@ -244,10 +244,6 @@
isb
.endif /* _init_sctlr */
#if DISABLE_MTPMU
bl mtpmu_disable
#endif
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.

View File

@ -119,22 +119,6 @@
* MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
* accesses to all Performance Monitors registers do not trap to EL3.
*
* MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
* prohibited in Secure state. This bit is RES0 in versions of the
* architecture with FEAT_PMUv3p5 not implemented, setting it to 1
* doesn't have any effect on them.
*
* MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
* prohibited in EL3. This bit is RES0 in versions of the
* architecture with FEAT_PMUv3p7 not implemented, setting it to 1
* doesn't have any effect on them.
*
* MDCR_EL3.SPME: Set to zero so that event counting by the programmable
* counters PMEVCNTR<n>_EL0 is prohibited in Secure state. If ARMv8.2
* Debug is not implemented this bit does not have any effect on the
* counters unless there is support for the implementation defined
* authentication interface ExternalSecureNoninvasiveDebugEnabled().
*
* MDCR_EL3.NSTB, MDCR_EL3.NSTBE: Set to zero so that Trace Buffer
* owning security state is Secure state. If FEAT_TRBE is implemented,
* accesses to Trace Buffer control registers at EL2 and EL1 in any
@ -149,10 +133,9 @@
* ---------------------------------------------------------------------
*/
mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | \
MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT | \
MDCR_MCCD_BIT) & ~(MDCR_SPME_BIT | MDCR_TDOSA_BIT | \
MDCR_TDA_BIT | MDCR_TPM_BIT | MDCR_NSTB(MDCR_NSTB_EL1) | \
MDCR_NSTBE | MDCR_TTRF_BIT))
MDCR_SPD32(MDCR_SPD32_DISABLE)) & \
~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT | \
MDCR_NSTB(MDCR_NSTB_EL1) | MDCR_NSTBE | MDCR_TTRF_BIT))
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_TRACEFILT_SHIFT, #ID_AA64DFR0_TRACEFILT_LENGTH
@ -161,36 +144,6 @@
1:
msr mdcr_el3, x0
/* ---------------------------------------------------------------------
* Initialise PMCR_EL0 setting all fields rather than relying
* on hw. Some fields are architecturally UNKNOWN on reset.
*
* PMCR_EL0.LP: Set to one so that event counter overflow, that
* is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
* that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
* is implemented. This bit is RES0 in versions of the architecture
* earlier than ARMv8.5, setting it to 1 doesn't have any effect
* on them.
*
* PMCR_EL0.LC: Set to one so that cycle counter overflow, that
* is recorded in PMOVSCLR_EL0[31], occurs on the increment
* that changes PMCCNTR_EL0[63] from 1 to 0.
*
* PMCR_EL0.DP: Set to one so that the cycle counter,
* PMCCNTR_EL0 does not count when event counting is prohibited.
*
* PMCR_EL0.X: Set to zero to disable export of events.
*
* PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
* counts on every clock cycle.
* ---------------------------------------------------------------------
*/
mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
msr pmcr_el0, x0
/* ---------------------------------------------------------------------
* Enable External Aborts and SError Interrupts now that the exception
* vectors have been setup.
@ -340,10 +293,6 @@
isb
.endif /* _init_sctlr */
#if DISABLE_MTPMU
bl mtpmu_disable
#endif
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.

View File

@ -37,6 +37,9 @@ void cm_prepare_el3_exit(uint32_t security_state);
void cm_prepare_el3_exit_ns(void);
#ifdef __aarch64__
#if IMAGE_BL31
void cm_manage_extensions_el3(void);
#endif
#if CTX_INCLUDE_EL2_REGS
void cm_el2_sysregs_context_save(uint32_t security_state);
void cm_el2_sysregs_context_restore(uint32_t security_state);
@ -84,6 +87,7 @@ static inline void cm_set_next_context(void *context)
#else
void *cm_get_next_context(void);
void cm_set_next_context(void *context);
static inline void cm_manage_extensions_el3(void) {}
#endif /* __aarch64__ */
#endif /* CONTEXT_MGMT_H */

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PMUV3_H
#define PMUV3_H
#include <context.h>
void pmuv3_disable_el3(void);
#ifdef __aarch64__
void pmuv3_enable(cpu_context_t *ctx);
void pmuv3_init_el2_unused(void);
#endif /* __aarch64__ */
#endif /* PMUV3_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -17,6 +17,7 @@
#include <context.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/pmuv3.h>
#include <lib/extensions/sys_reg_trace.h>
#include <lib/extensions/trf.h>
#include <lib/utils.h>
@ -147,6 +148,12 @@ static void enable_extensions_nonsecure(bool el2_unused)
if (is_feat_trf_supported()) {
trf_enable();
}
/*
* Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure
* state execution. This does not affect lower NS ELs.
*/
pmuv3_disable_el3();
#endif
}

View File

@ -568,6 +568,8 @@ endfunc fpregs_context_restore
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
mrs x18, sp_el0
str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
/* PMUv3 is presumed to be always present */
mrs x9, pmcr_el0
str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
/* Disable cycle counter when event counting is prohibited */
@ -651,6 +653,8 @@ func restore_gp_pmcr_pauth_regs
msr APGAKeyLo_EL1, x8
msr APGAKeyHi_EL1, x9
#endif /* CTX_INCLUDE_PAUTH_REGS */
/* PMUv3 is presumed to be always present */
ldr x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
msr pmcr_el0, x0
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]

View File

@ -24,6 +24,7 @@
#include <lib/extensions/amu.h>
#include <lib/extensions/brbe.h>
#include <lib/extensions/mpam.h>
#include <lib/extensions/pmuv3.h>
#include <lib/extensions/sme.h>
#include <lib/extensions/spe.h>
#include <lib/extensions/sve.h>
@ -37,6 +38,7 @@
CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
#endif /* ENABLE_FEAT_TWED */
static void manage_extensions_nonsecure(cpu_context_t *ctx);
static void manage_extensions_secure(cpu_context_t *ctx);
static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
@ -265,16 +267,6 @@ static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *
write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
icc_sre_el2);
/*
* Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
* throw anyone off who expects this to be sensible.
* TODO: A similar thing happens in cm_prepare_el3_exit. They should be
* unified with the proper PMU implementation
*/
u_register_t mdcr_el2 = ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) &
PMCR_EL0_N_MASK);
write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
if (is_feat_hcx_supported()) {
/*
* Initialize register HCRX_EL2 with its init value.
@ -288,6 +280,8 @@ static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *
HCRX_EL2_INIT_VAL);
}
#endif /* CTX_INCLUDE_EL2_REGS */
manage_extensions_nonsecure(ctx);
}
/*******************************************************************************
@ -504,9 +498,11 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
* it is zero. This function updates some registers in-place and its contents
* are being prepared to be moved to cm_manage_extensions_el3 and
* cm_manage_extensions_nonsecure.
******************************************************************************/
static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
static void manage_extensions_nonsecure_mixed(bool el2_unused, cpu_context_t *ctx)
{
#if IMAGE_BL31
if (is_feat_spe_supported()) {
@ -548,6 +544,39 @@ static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
#endif
}
/*******************************************************************************
* Enable architecture extensions for EL3 execution. This function only updates
* registers in-place which are expected to either never change or be
* overwritten by el3_exit.
******************************************************************************/
#if IMAGE_BL31
void cm_manage_extensions_el3(void)
{
pmuv3_disable_el3();
}
#endif /* IMAGE_BL31 */
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
******************************************************************************/
static void manage_extensions_nonsecure(cpu_context_t *ctx)
{
#if IMAGE_BL31
pmuv3_enable(ctx);
#endif /* IMAGE_BL31 */
}
/*******************************************************************************
* Enable architecture extensions in-place at EL2 on first entry to Non-secure
* world when EL2 is empty and unused.
******************************************************************************/
static void manage_extensions_nonsecure_el2_unused(void)
{
#if IMAGE_BL31
pmuv3_init_el2_unused();
#endif /* IMAGE_BL31 */
}
/*******************************************************************************
* Enable architecture extensions on first entry to Secure world.
******************************************************************************/
@ -758,24 +787,11 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
* MDCR_EL2.HLP: Set to one so that event counter
* overflow, that is recorded in PMOVSCLR_EL0[0-30],
* occurs on the increment that changes
* PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
* implemented. This bit is RES0 in versions of the
* architecture earlier than ARMv8.5, setting it to 1
* doesn't have any effect on them.
*
* MDCR_EL2.TTRF: Set to zero so that access to Trace
* Filter Control register TRFCR_EL1 at EL1 is not
* trapped to EL2. This bit is RES0 in versions of
* the architecture earlier than ARMv8.4.
*
* MDCR_EL2.HPMD: Set to one so that event counting is
* prohibited at EL2. This bit is RES0 in versions of
* the architecture earlier than ARMv8.1, setting it
* to 1 doesn't have any effect on them.
*
* MDCR_EL2.TPMS: Set to zero so that accesses to
* Statistical Profiling control registers from EL1
* do not trap to EL2. This bit is RES0 when SPE is
@ -795,35 +811,15 @@ void cm_prepare_el3_exit(uint32_t security_state)
* MDCR_EL2.TDE: Set to zero so that debug exceptions
* are not routed to EL2.
*
* MDCR_EL2.HPME: Set to zero to disable EL2 Performance
* Monitors.
*
* MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
* EL1 accesses to all Performance Monitors registers
* are not trapped to EL2.
*
* MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
* and EL1 accesses to the PMCR_EL0 or PMCR are not
* trapped to EL2.
*
* MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
* architecturally-defined reset value.
*
* MDCR_EL2.E2TB: Set to zero so that the trace Buffer
* owning exception level is NS-EL1 and, tracing is
* prohibited at NS-EL2. These bits are RES0 when
* FEAT_TRBE is not implemented.
*/
mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
MDCR_EL2_HPMD) |
((read_pmcr_el0() & PMCR_EL0_N_BITS)
>> PMCR_EL0_N_SHIFT)) &
~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
mdcr_el2 = ((MDCR_EL2_RESET_VAL) & ~(MDCR_EL2_TTRF |
MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
MDCR_EL2_TPMCR_BIT |
MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)));
write_mdcr_el2(mdcr_el2);
@ -845,8 +841,10 @@ void cm_prepare_el3_exit(uint32_t security_state)
*/
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT));
manage_extensions_nonsecure_el2_unused();
}
manage_extensions_nonsecure(el2_unused, ctx);
manage_extensions_nonsecure_mixed(el2_unused, ctx);
}
cm_el1_sysregs_context_restore(security_state);
@ -1167,7 +1165,7 @@ void cm_prepare_el3_exit_ns(void)
* direct register updates. Therefore, do this here
* instead of when setting up context.
*/
manage_extensions_nonsecure(0, ctx);
manage_extensions_nonsecure_mixed(0, ctx);
/*
* Set the NS bit to be able to access the ICC_SRE_EL2

View File

@ -1,105 +0,0 @@
/*
* Copyright (c) 2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
.global mtpmu_disable
/* -------------------------------------------------------------
* The functions in this file are called at entrypoint, before
* the CPU has decided whether this is a cold or a warm boot.
* Therefore there are no stack yet to rely on for a C function
* call.
* -------------------------------------------------------------
*/
/*
* bool mtpmu_supported(void)
*
* Return a boolean indicating whether FEAT_MTPMU is supported or not.
*
* Trash registers: r0.
*/
func mtpmu_supported
ldcopr r0, ID_DFR1
and r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
cmp r0, #ID_DFR1_MTPMU_SUPPORTED
mov r0, #0
addeq r0, r0, #1
bx lr
endfunc mtpmu_supported
/*
* bool el_implemented(unsigned int el)
*
* Return a boolean indicating if the specified EL (2 or 3) is implemented.
*
* Trash registers: r0
*/
func el_implemented
cmp r0, #3
ldcopr r0, ID_PFR1
lsreq r0, r0, #ID_PFR1_SEC_SHIFT
lsrne r0, r0, #ID_PFR1_VIRTEXT_SHIFT
/*
* ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
* so use any one of them
*/
and r0, r0, #ID_PFR1_VIRTEXT_MASK
cmp r0, #ID_PFR1_ELx_ENABLED
mov r0, #0
addeq r0, r0, #1
bx lr
endfunc el_implemented
/*
* void mtpmu_disable(void)
*
* Disable mtpmu feature if supported.
*
* Trash register: r0, r1, r2
*/
func mtpmu_disable
mov r2, lr
bl mtpmu_supported
cmp r0, #0
bxeq r2 /* FEAT_MTPMU not supported */
/* FEAT_MTMPU Supported */
mov r0, #3
bl el_implemented
cmp r0, #0
beq 1f
/* EL3 implemented */
ldcopr r0, SDCR
ldr r1, =SDCR_MTPME_BIT
bic r0, r0, r1
stcopr r0, SDCR
/*
* If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
* FEAT_MTPMU is controlled only from EL3, so no need to perform
* any operations for EL2.
*/
isb
bx r2
1:
/* EL3 not implemented */
mov r0, #2
bl el_implemented
cmp r0, #0
bxeq r2 /* No EL2 or EL3 implemented */
/* EL2 implemented */
ldcopr r0, HDCR
ldr r1, =HDCR_MTPME_BIT
orr r0, r0, r1
stcopr r0, HDCR
isb
bx r2
endfunc mtpmu_disable

View File

@ -1,96 +0,0 @@
/*
* Copyright (c) 2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
.global mtpmu_disable
/* -------------------------------------------------------------
* The functions in this file are called at entrypoint, before
* the CPU has decided whether this is a cold or a warm boot.
* Therefore there are no stack yet to rely on for a C function
* call.
* -------------------------------------------------------------
*/
/*
* bool mtpmu_supported(void)
*
* Return a boolean indicating whether FEAT_MTPMU is supported or not.
*
* Trash registers: x0, x1
*/
func mtpmu_supported
mrs x0, id_aa64dfr0_el1
mov_imm x1, ID_AA64DFR0_MTPMU_MASK
and x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
cmp x0, ID_AA64DFR0_MTPMU_SUPPORTED
cset x0, eq
ret
endfunc mtpmu_supported
/*
* bool el_implemented(unsigned int el_shift)
*
* Return a boolean indicating if the specified EL is implemented.
* The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
*
* Trash registers: x0, x1
*/
func el_implemented
mrs x1, id_aa64pfr0_el1
lsr x1, x1, x0
cmp x1, #ID_AA64PFR0_ELX_MASK
cset x0, eq
ret
endfunc el_implemented
/*
* void mtpmu_disable(void)
*
* Disable mtpmu feature if supported.
*
* Trash register: x0, x1, x30
*/
func mtpmu_disable
mov x10, x30
bl mtpmu_supported
cbz x0, exit_disable
/* FEAT_MTMPU Supported */
mov_imm x0, ID_AA64PFR0_EL3_SHIFT
bl el_implemented
cbz x0, 1f
/* EL3 implemented */
mrs x0, mdcr_el3
mov_imm x1, MDCR_MTPME_BIT
bic x0, x0, x1
msr mdcr_el3, x0
/*
* If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
* FEAT_MTPMU is controlled only from EL3, so no need to perform
* any operations for EL2.
*/
isb
exit_disable:
ret x10
1:
/* EL3 not implemented */
mov_imm x0, ID_AA64PFR0_EL2_SHIFT
bl el_implemented
cbz x0, exit_disable
/* EL2 implemented */
mrs x0, mdcr_el2
mov_imm x1, MDCR_EL2_MTPME
bic x0, x0, x1
msr mdcr_el2, x0
isb
ret x10
endfunc mtpmu_disable

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/extensions/pmuv3.h>
static u_register_t mtpmu_disable_el3(u_register_t sdcr)
{
if (!is_feat_mtpmu_supported()) {
return sdcr;
}
/*
* SDCR.MTPME = 0
* FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>.MT is
* zero.
*/
sdcr &= ~SDCR_MTPME_BIT;
return sdcr;
}
/*
* Applies to all PMU versions. Name is PMUv3 for compatibility with aarch64 and
* to not clash with platforms which reuse the PMU name
*/
void pmuv3_disable_el3(void)
{
u_register_t sdcr = read_sdcr();
/* ---------------------------------------------------------------------
* Initialise SDCR, setting all the fields rather than relying on hw.
*
* SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
* in Secure state. This bit is RES0 in versions of the architecture
* earlier than ARMv8.5
*
* SDCR.SPME: Set to zero so that event counting is prohibited in Secure
* state (and explicitly EL3 with later revisions). If ARMv8.2 Debug is
* not implemented this bit does not have any effect on the counters
* unless there is support for the implementation defined
* authentication interface ExternalSecureNoninvasiveDebugEnabled().
* ---------------------------------------------------------------------
*/
sdcr = (sdcr | SDCR_SCCD_BIT) & ~SDCR_SPME_BIT;
sdcr = mtpmu_disable_el3(sdcr);
write_sdcr(sdcr);
/* ---------------------------------------------------------------------
* Initialise PMCR, setting all fields rather than relying
* on hw. Some fields are architecturally UNKNOWN on reset.
*
* PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
*
* PMCR.X: Set to zero to disable export of events.
*
* PMCR.C: Set to one to reset PMCCNTR.
*
* PMCR.P: Set to one to reset each event counter PMEVCNTR<n> to zero.
*
* PMCR.E: Set to zero to disable cycle and event counters.
* ---------------------------------------------------------------------
*/
write_pmcr(read_pmcr() | PMCR_DP_BIT | PMCR_C_BIT | PMCR_P_BIT |
~(PMCR_X_BIT | PMCR_E_BIT));
}

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/extensions/pmuv3.h>
static u_register_t init_mdcr_el2_hpmn(u_register_t mdcr_el2)
{
/*
* Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
* throw anyone off who expects this to be sensible.
*/
mdcr_el2 &= ~MDCR_EL2_HPMN_MASK;
mdcr_el2 |= ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK);
return mdcr_el2;
}
void pmuv3_enable(cpu_context_t *ctx)
{
#if CTX_INCLUDE_EL2_REGS
u_register_t mdcr_el2;
mdcr_el2 = read_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2);
mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
#endif /* CTX_INCLUDE_EL2_REGS */
}
static u_register_t mtpmu_disable_el3(u_register_t mdcr_el3)
{
if (!is_feat_mtpmu_supported()) {
return mdcr_el3;
}
/*
* MDCR_EL3.MTPME = 0
* FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
* zero.
*/
mdcr_el3 &= ~MDCR_MTPME_BIT;
return mdcr_el3;
}
void pmuv3_disable_el3(void)
{
u_register_t mdcr_el3 = read_mdcr_el3();
/* ---------------------------------------------------------------------
* Initialise MDCR_EL3, setting all fields rather than relying on hw.
* Some fields are architecturally UNKNOWN on reset.
*
* MDCR_EL3.MPMX: Set to zero to not affect event counters (when
* SPME = 0).
*
* MDCR_EL3.MCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
* prohibited in EL3. This bit is RES0 in versions of the
* architecture with FEAT_PMUv3p7 not implemented.
*
* MDCR_EL3.SCCD: Set to one so that cycle counting by PMCCNTR_EL0 is
* prohibited in Secure state. This bit is RES0 in versions of the
* architecture with FEAT_PMUv3p5 not implemented.
*
* MDCR_EL3.SPME: Set to zero so that event counting is prohibited in
* Secure state (and explicitly EL3 with later revisions). If ARMv8.2
* Debug is not implemented this bit does not have any effect on the
* counters unless there is support for the implementation defined
* authentication interface ExternalSecureNoninvasiveDebugEnabled().
*
* The SPME/MPMX combination is a little tricky. Below is a small
* summary if another combination is ever needed:
* SPME | MPMX | secure world | EL3
* -------------------------------------
* 0 | 0 | disabled | disabled
* 1 | 0 | enabled | enabled
* 0 | 1 | enabled | disabled
* 1 | 1 | enabled | disabled only for counters 0 to
* MDCR_EL2.HPMN - 1. Enabled for the rest
*/
mdcr_el3 = (mdcr_el3 | MDCR_SCCD_BIT | MDCR_MCCD_BIT) &
~(MDCR_MPMX_BIT | MDCR_SPME_BIT);
mdcr_el3 = mtpmu_disable_el3(mdcr_el3);
write_mdcr_el3(mdcr_el3);
/* ---------------------------------------------------------------------
* Initialise PMCR_EL0 setting all fields rather than relying
* on hw. Some fields are architecturally UNKNOWN on reset.
*
* PMCR_EL0.DP: Set to one so that the cycle counter,
* PMCCNTR_EL0 does not count when event counting is prohibited.
* Necessary on PMUv3 <= p7 where MDCR_EL3.{SCCD,MCCD} are not
* available
*
* PMCR_EL0.X: Set to zero to disable export of events.
*
* PMCR_EL0.C: Set to one to reset PMCCNTR_EL0 to zero.
*
* PMCR_EL0.P: Set to one to reset each event counter PMEVCNTR<n>_EL0 to
* zero.
*
* PMCR_EL0.E: Set to zero to disable cycle and event counters.
* ---------------------------------------------------------------------
*/
write_pmcr_el0((read_pmcr_el0() | PMCR_EL0_DP_BIT | PMCR_EL0_C_BIT |
PMCR_EL0_P_BIT) & ~(PMCR_EL0_X_BIT | PMCR_EL0_E_BIT));
}
static u_register_t mtpmu_disable_el2(u_register_t mdcr_el2)
{
if (!is_feat_mtpmu_supported()) {
return mdcr_el2;
}
/*
* MDCR_EL2.MTPME = 0
* FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
* zero.
*/
mdcr_el2 &= ~MDCR_EL2_MTPME;
return mdcr_el2;
}
void pmuv3_init_el2_unused(void)
{
u_register_t mdcr_el2 = read_mdcr_el2();
/*
* Initialise MDCR_EL2, setting all fields rather than
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
* MDCR_EL2.HLP: Set to one so that event counter overflow, that is
* recorded in PMOVSCLR_EL0[0-30], occurs on the increment that changes
* PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is implemented.
* This bit is RES0 in versions of the architecture earlier than
* ARMv8.5, setting it to 1 doesn't have any effect on them.
*
* MDCR_EL2.HCCD: Set to one to prohibit cycle counting at EL2. This bit
* is RES0 in versions of the architecture with FEAT_PMUv3p5 not
* implemented.
*
* MDCR_EL2.HPMD: Set to one so that event counting is
* prohibited at EL2 for counter n < MDCR_EL2.HPMN. This bit is RES0
* in versions of the architecture with FEAT_PMUv3p1 not implemented.
*
* MDCR_EL2.HPME: Set to zero to disable event counters for counters
* n >= MDCR_EL2.HPMN.
*
* MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
* EL1 accesses to all Performance Monitors registers
* are not trapped to EL2.
*
* MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
* and EL1 accesses to the PMCR_EL0 or PMCR are not
* trapped to EL2.
*/
mdcr_el2 = (mdcr_el2 | MDCR_EL2_HLP_BIT | MDCR_EL2_HPMD_BIT |
MDCR_EL2_HCCD_BIT) &
~(MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT);
mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
mdcr_el2 = mtpmu_disable_el2(mdcr_el2);
write_mdcr_el2(mdcr_el2);
}

View File

@ -985,6 +985,9 @@ void psci_warmboot_entrypoint(void)
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
/* Init registers that never change for the lifetime of TF-A */
cm_manage_extensions_el3();
/*
* Verify that we have been explicitly turned ON or resumed from
* suspend.

View File

@ -18,6 +18,8 @@
#include <context.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/el3_runtime/pubsub.h>
#include <lib/extensions/pmuv3.h>
#include <lib/extensions/sys_reg_trace.h>
#include <lib/gpt_rme/gpt_rme.h>
#include <lib/spinlock.h>
@ -125,6 +127,8 @@ static void manage_extensions_realm(cpu_context_t *ctx)
*/
sve_enable(ctx);
}
pmuv3_enable(ctx);
}
/*******************************************************************************