refactor(cm): convert el1-ctx assembly offset entries to c structure

Currently the EL1 part of the context structure (el1_sysregs_t),
is coupled with feature flags reducing the context memory allocation
for platforms, that don't enable/support all the architectural
features at once.

Similar to the el2 context optimization commit-"d6af234" this patch
further improves this section by converting the assembly context-offset
entries into a c structure. It relies on garbage collection of the
linker removing unreferenced structures from memory, as well as aiding
in readability and future maintenance. Additionally, it eliminates
the #ifs usage in 'context_mgmt.c' source file.

Change-Id: If6075931cec994bc89231241337eccc7042c5ede
Signed-off-by: Jayanth Dodderi Chidanand <jayanthdodderi.chidanand@arm.com>
This commit is contained in:
Jayanth Dodderi Chidanand 2024-04-11 11:09:12 +01:00
parent 59b7c0a03f
commit 42e35d2f8c
10 changed files with 464 additions and 366 deletions

View file

@ -7,6 +7,7 @@
#ifndef CONTEXT_H
#define CONTEXT_H
#include <lib/el3_runtime/context_el1.h>
#include <lib/el3_runtime/context_el2.h>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/utils_def.h>
@ -81,152 +82,11 @@
#define CTX_EL3STATE_END U(0x50) /* Align to the next 16 byte boundary */
#endif /* FFH_SUPPORT */
/*******************************************************************************
* Constants that allow assembler code to access members of and the
* 'el1_sys_regs' structure at their correct offsets. Note that some of the
* registers are only 32-bits wide but are stored as 64-bit values for
* convenience
******************************************************************************/
#define CTX_EL1_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#define CTX_SPSR_EL1 U(0x0)
#define CTX_ELR_EL1 U(0x8)
#define CTX_SCTLR_EL1 U(0x10)
#define CTX_TCR_EL1 U(0x18)
#define CTX_CPACR_EL1 U(0x20)
#define CTX_CSSELR_EL1 U(0x28)
#define CTX_SP_EL1 U(0x30)
#define CTX_ESR_EL1 U(0x38)
#define CTX_TTBR0_EL1 U(0x40)
#define CTX_TTBR1_EL1 U(0x48)
#define CTX_MAIR_EL1 U(0x50)
#define CTX_AMAIR_EL1 U(0x58)
#define CTX_ACTLR_EL1 U(0x60)
#define CTX_TPIDR_EL1 U(0x68)
#define CTX_TPIDR_EL0 U(0x70)
#define CTX_TPIDRRO_EL0 U(0x78)
#define CTX_PAR_EL1 U(0x80)
#define CTX_FAR_EL1 U(0x88)
#define CTX_AFSR0_EL1 U(0x90)
#define CTX_AFSR1_EL1 U(0x98)
#define CTX_CONTEXTIDR_EL1 U(0xa0)
#define CTX_VBAR_EL1 U(0xa8)
#define CTX_MDCCINT_EL1 U(0xb0)
#define CTX_MDSCR_EL1 U(0xb8)
#define CTX_AARCH64_END U(0xc0) /* Align to the next 16 byte boundary */
/*
* If the platform is AArch64-only, there is no need to save and restore these
* AArch32 registers.
*/
#if CTX_INCLUDE_AARCH32_REGS
#define CTX_SPSR_ABT (CTX_AARCH64_END + U(0x0))
#define CTX_SPSR_UND (CTX_AARCH64_END + U(0x8))
#define CTX_SPSR_IRQ (CTX_AARCH64_END + U(0x10))
#define CTX_SPSR_FIQ (CTX_AARCH64_END + U(0x18))
#define CTX_DACR32_EL2 (CTX_AARCH64_END + U(0x20))
#define CTX_IFSR32_EL2 (CTX_AARCH64_END + U(0x28))
#define CTX_AARCH32_END (CTX_AARCH64_END + U(0x30)) /* Align to the next 16 byte boundary */
#else
#define CTX_AARCH32_END CTX_AARCH64_END
#endif /* CTX_INCLUDE_AARCH32_REGS */
/*
* If the timer registers aren't saved and restored, we don't have to reserve
* space for them in the context
*/
#if NS_TIMER_SWITCH
#define CTX_CNTP_CTL_EL0 (CTX_AARCH32_END + U(0x0))
#define CTX_CNTP_CVAL_EL0 (CTX_AARCH32_END + U(0x8))
#define CTX_CNTV_CTL_EL0 (CTX_AARCH32_END + U(0x10))
#define CTX_CNTV_CVAL_EL0 (CTX_AARCH32_END + U(0x18))
#define CTX_CNTKCTL_EL1 (CTX_AARCH32_END + U(0x20))
#define CTX_TIMER_SYSREGS_END (CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
#else
#define CTX_TIMER_SYSREGS_END CTX_AARCH32_END
#endif /* NS_TIMER_SWITCH */
#if ENABLE_FEAT_MTE2
#define CTX_TFSRE0_EL1 (CTX_TIMER_SYSREGS_END + U(0x0))
#define CTX_TFSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x8))
#define CTX_RGSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x10))
#define CTX_GCR_EL1 (CTX_TIMER_SYSREGS_END + U(0x18))
#define CTX_MTE_REGS_END (CTX_TIMER_SYSREGS_END + U(0x20)) /* Align to the next 16 byte boundary */
#else
#define CTX_MTE_REGS_END CTX_TIMER_SYSREGS_END
#endif /* ENABLE_FEAT_MTE2 */
#if ENABLE_FEAT_RAS
#define CTX_DISR_EL1 (CTX_MTE_REGS_END + U(0x0))
#define CTX_RAS_REGS_END (CTX_MTE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_RAS_REGS_END CTX_MTE_REGS_END
#endif /* ENABLE_FEAT_RAS */
#if ENABLE_FEAT_S1PIE
#define CTX_PIRE0_EL1 (CTX_RAS_REGS_END + U(0x0))
#define CTX_PIR_EL1 (CTX_RAS_REGS_END + U(0x8))
#define CTX_S1PIE_REGS_END (CTX_RAS_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_S1PIE_REGS_END CTX_RAS_REGS_END
#endif /* ENABLE_FEAT_S1PIE */
#if ENABLE_FEAT_S1POE
#define CTX_POR_EL1 (CTX_S1PIE_REGS_END + U(0x0))
#define CTX_S1POE_REGS_END (CTX_S1PIE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_S1POE_REGS_END CTX_S1PIE_REGS_END
#endif /* ENABLE_FEAT_S1POE */
#if ENABLE_FEAT_S2POE
#define CTX_S2POR_EL1 (CTX_S1POE_REGS_END + U(0x0))
#define CTX_S2POE_REGS_END (CTX_S1POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_S2POE_REGS_END CTX_S1POE_REGS_END
#endif /* ENABLE_FEAT_S2POE */
#if ENABLE_FEAT_TCR2
#define CTX_TCR2_EL1 (CTX_S2POE_REGS_END + U(0x0))
#define CTX_TCR2_REGS_END (CTX_S2POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_TCR2_REGS_END CTX_S2POE_REGS_END
#endif /* ENABLE_FEAT_TCR2 */
#if ENABLE_TRF_FOR_NS
#define CTX_TRFCR_EL1 (CTX_TCR2_REGS_END + U(0x0))
#define CTX_TRF_REGS_END (CTX_TCR2_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_TRF_REGS_END CTX_TCR2_REGS_END
#endif /* ENABLE_TRF_FOR_NS */
#if ENABLE_FEAT_CSV2_2
#define CTX_SCXTNUM_EL0 (CTX_TRF_REGS_END + U(0x0))
#define CTX_SCXTNUM_EL1 (CTX_TRF_REGS_END + U(0x8))
#define CTX_CSV2_2_REGS_END (CTX_TRF_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_CSV2_2_REGS_END CTX_TRF_REGS_END
#endif /* ENABLE_FEAT_CSV2_2 */
#if ENABLE_FEAT_GCS
#define CTX_GCSCR_EL1 (CTX_CSV2_2_REGS_END + U(0x0))
#define CTX_GCSCRE0_EL1 (CTX_CSV2_2_REGS_END + U(0x8))
#define CTX_GCSPR_EL1 (CTX_CSV2_2_REGS_END + U(0x10))
#define CTX_GCSPR_EL0 (CTX_CSV2_2_REGS_END + U(0x18))
#define CTX_GCS_REGS_END (CTX_CSV2_2_REGS_END + U(0x20)) /* Align to the next 16 byte boundary */
#else
#define CTX_GCS_REGS_END CTX_CSV2_2_REGS_END
#endif /* ENABLE_FEAT_GCS */
/*
* End of EL1 system registers.
*/
#define CTX_EL1_SYSREGS_END CTX_GCS_REGS_END
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'fp_regs'
* structure at their correct offsets.
******************************************************************************/
# define CTX_FPREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
# define CTX_FPREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#if CTX_INCLUDE_FPREGS
#define CTX_FP_Q0 U(0x0)
#define CTX_FP_Q1 U(0x10)
@ -369,7 +229,6 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#define CTX_EL1_SYSREGS_ALL (CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
@ -393,12 +252,6 @@
*/
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during world switches.
*/
DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
/*
* AArch64 floating point register context structure for preserving
* the floating point state during switches from one security state to
@ -446,7 +299,6 @@ DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
typedef struct cpu_context {
gp_regs_t gpregs_ctx;
el3_state_t el3state_ctx;
el1_sysregs_t el1_sysregs_ctx;
#if CTX_INCLUDE_FPREGS
fp_regs_t fpregs_ctx;
@ -461,6 +313,8 @@ typedef struct cpu_context {
pauth_t pauth_ctx;
#endif
el1_sysregs_t el1_sysregs_ctx;
#if CTX_INCLUDE_EL2_REGS
el2_sysregs_t el2_sysregs_ctx;
#endif
@ -510,9 +364,6 @@ CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx),
CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
assert_core_context_el3state_offset_mismatch);
CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx),
assert_core_context_el1_sys_offset_mismatch);
#if CTX_INCLUDE_FPREGS
CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
assert_core_context_fp_offset_mismatch);

View file

@ -0,0 +1,272 @@
/*
* Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef CONTEXT_EL1_H
#define CONTEXT_EL1_H
#ifndef __ASSEMBLER__
/*******************************************************************************
* EL1 Registers:
* AArch64 EL1 system register context structure for preserving the
* architectural state during world switches.
******************************************************************************/
typedef struct el1_common_regs {
uint64_t spsr_el1;
uint64_t elr_el1;
#if (!ERRATA_SPECULATIVE_AT)
uint64_t sctlr_el1;
uint64_t tcr_el1;
#endif /* ERRATA_SPECULATIVE_AT=0 */
uint64_t cpacr_el1;
uint64_t csselr_el1;
uint64_t sp_el1;
uint64_t esr_el1;
uint64_t ttbr0_el1;
uint64_t ttbr1_el1;
uint64_t mair_el1;
uint64_t amair_el1;
uint64_t actlr_el1;
uint64_t tpidr_el1;
uint64_t tpidr_el0;
uint64_t tpidrro_el0;
uint64_t par_el1;
uint64_t far_el1;
uint64_t afsr0_el1;
uint64_t afsr1_el1;
uint64_t contextidr_el1;
uint64_t vbar_el1;
uint64_t mdccint_el1;
uint64_t mdscr_el1;
} el1_common_regs_t;
typedef struct el1_aarch32_regs {
uint64_t spsr_abt;
uint64_t spsr_und;
uint64_t spsr_irq;
uint64_t spsr_fiq;
uint64_t dacr32_el2;
uint64_t ifsr32_el2;
} el1_aarch32_regs_t;
typedef struct el1_arch_timer_regs {
uint64_t cntp_ctl_el0;
uint64_t cntp_cval_el0;
uint64_t cntv_ctl_el0;
uint64_t cntv_cval_el0;
uint64_t cntkctl_el1;
} el1_arch_timer_regs_t;
typedef struct el1_mte2_regs {
uint64_t tfsre0_el1;
uint64_t tfsr_el1;
uint64_t rgsr_el1;
uint64_t gcr_el1;
} el1_mte2_regs_t;
typedef struct el1_ras_regs {
uint64_t disr_el1;
} el1_ras_regs_t;
typedef struct el1_s1pie_regs {
uint64_t pire0_el1;
uint64_t pir_el1;
} el1_s1pie_regs_t;
typedef struct el1_s1poe_regs {
uint64_t por_el1;
} el1_s1poe_regs_t;
typedef struct el1_s2poe_regs {
uint64_t s2por_el1;
} el1_s2poe_regs_t;
typedef struct el1_tcr2_regs {
uint64_t tcr2_el1;
} el1_tcr2_regs_t;
typedef struct el1_trf_regs {
uint64_t trfcr_el1;
} el1_trf_regs_t;
typedef struct el1_csv2_2_regs {
uint64_t scxtnum_el0;
uint64_t scxtnum_el1;
} el1_csv2_2_regs_t;
typedef struct el1_gcs_regs {
uint64_t gcscr_el1;
uint64_t gcscre0_el1;
uint64_t gcspr_el1;
uint64_t gcspr_el0;
} el1_gcs_regs_t;
typedef struct el1_sysregs {
el1_common_regs_t common;
#if CTX_INCLUDE_AARCH32_REGS
el1_aarch32_regs_t el1_aarch32;
#endif
#if NS_TIMER_SWITCH
el1_arch_timer_regs_t arch_timer;
#endif
#if ENABLE_FEAT_MTE2
el1_mte2_regs_t mte2;
#endif
#if ENABLE_FEAT_RAS
el1_ras_regs_t ras;
#endif
#if ENABLE_FEAT_S1PIE
el1_s1pie_regs_t s1pie;
#endif
#if ENABLE_FEAT_S1POE
el1_s1poe_regs_t s1poe;
#endif
#if ENABLE_FEAT_S2POE
el1_s2poe_regs_t s2poe;
#endif
#if ENABLE_FEAT_TCR2
el1_tcr2_regs_t tcr2;
#endif
#if ENABLE_TRF_FOR_NS
el1_trf_regs_t trf;
#endif
#if ENABLE_FEAT_CSV2_2
el1_csv2_2_regs_t csv2_2;
#endif
#if ENABLE_FEAT_GCS
el1_gcs_regs_t gcs;
#endif
} el1_sysregs_t;
/*
* Macros to access members related to individual features of the el1_sysregs_t
* structures.
*/
#define read_el1_ctx_common(ctx, reg) (((ctx)->common).reg)
#define write_el1_ctx_common(ctx, reg, val) ((((ctx)->common).reg) \
= (uint64_t) (val))
#if NS_TIMER_SWITCH
#define read_el1_ctx_arch_timer(ctx, reg) (((ctx)->arch_timer).reg)
#define write_el1_ctx_arch_timer(ctx, reg, val) ((((ctx)->arch_timer).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_arch_timer(ctx, reg) ULL(0)
#define write_el1_ctx_arch_timer(ctx, reg, val)
#endif /* NS_TIMER_SWITCH */
#if CTX_INCLUDE_AARCH32_REGS
#define read_el1_ctx_aarch32(ctx, reg) (((ctx)->el1_aarch32).reg)
#define write_el1_ctx_aarch32(ctx, reg, val) ((((ctx)->el1_aarch32).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_aarch32(ctx, reg) ULL(0)
#define write_el1_ctx_aarch32(ctx, reg, val)
#endif /* CTX_INCLUDE_AARCH32_REGS */
#if ENABLE_FEAT_MTE2
#define read_el1_ctx_mte2(ctx, reg) (((ctx)->mte2).reg)
#define write_el1_ctx_mte2(ctx, reg, val) ((((ctx)->mte2).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_mte2(ctx, reg) ULL(0)
#define write_el1_ctx_mte2(ctx, reg, val)
#endif /* ENABLE_FEAT_MTE2 */
#if ENABLE_FEAT_RAS
#define read_el1_ctx_ras(ctx, reg) (((ctx)->ras).reg)
#define write_el1_ctx_ras(ctx, reg, val) ((((ctx)->ras).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_ras(ctx, reg) ULL(0)
#define write_el1_ctx_ras(ctx, reg, val)
#endif /* ENABLE_FEAT_RAS */
#if ENABLE_FEAT_S1PIE
#define read_el1_ctx_s1pie(ctx, reg) (((ctx)->s1pie).reg)
#define write_el1_ctx_s1pie(ctx, reg, val) ((((ctx)->s1pie).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_s1pie(ctx, reg) ULL(0)
#define write_el1_ctx_s1pie(ctx, reg, val)
#endif /* ENABLE_FEAT_S1PIE */
#if ENABLE_FEAT_S1POE
#define read_el1_ctx_s1poe(ctx, reg) (((ctx)->s1poe).reg)
#define write_el1_ctx_s1poe(ctx, reg, val) ((((ctx)->s1poe).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_s1poe(ctx, reg) ULL(0)
#define write_el1_ctx_s1poe(ctx, reg, val)
#endif /* ENABLE_FEAT_S1POE */
#if ENABLE_FEAT_S2POE
#define read_el1_ctx_s2poe(ctx, reg) (((ctx)->s2poe).reg)
#define write_el1_ctx_s2poe(ctx, reg, val) ((((ctx)->s2poe).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_s2poe(ctx, reg) ULL(0)
#define write_el1_ctx_s2poe(ctx, reg, val)
#endif /* ENABLE_FEAT_S2POE */
#if ENABLE_FEAT_TCR2
#define read_el1_ctx_tcr2(ctx, reg) (((ctx)->tcr2).reg)
#define write_el1_ctx_tcr2(ctx, reg, val) ((((ctx)->tcr2).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_tcr2(ctx, reg) ULL(0)
#define write_el1_ctx_tcr2(ctx, reg, val)
#endif /* ENABLE_FEAT_TCR2 */
#if ENABLE_TRF_FOR_NS
#define read_el1_ctx_trf(ctx, reg) (((ctx)->trf).reg)
#define write_el1_ctx_trf(ctx, reg, val) ((((ctx)->trf).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_trf(ctx, reg) ULL(0)
#define write_el1_ctx_trf(ctx, reg, val)
#endif /* ENABLE_TRF_FOR_NS */
#if ENABLE_FEAT_CSV2_2
#define read_el1_ctx_csv2_2(ctx, reg) (((ctx)->csv2_2).reg)
#define write_el1_ctx_csv2_2(ctx, reg, val) ((((ctx)->csv2_2).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_csv2_2(ctx, reg) ULL(0)
#define write_el1_ctx_csv2_2(ctx, reg, val)
#endif /* ENABLE_FEAT_CSV2_2 */
#if ENABLE_FEAT_GCS
#define read_el1_ctx_gcs(ctx, reg) (((ctx)->gcs).reg)
#define write_el1_ctx_gcs(ctx, reg, val) ((((ctx)->gcs).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_gcs(ctx, reg) ULL(0)
#define write_el1_ctx_gcs(ctx, reg, val)
#endif /* ENABLE_FEAT_GCS */
/******************************************************************************/
#endif /* __ASSEMBLER__ */
#endif /* CONTEXT_EL1_H */

View file

@ -97,7 +97,7 @@ static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx), CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_elx);
#else
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_elx);
#endif /* ERRATA_SPECULATIVE_AT */
/*
@ -108,7 +108,7 @@ static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info
* be zero.
*/
actlr_elx = read_actlr_el1();
write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx);
}
/******************************************************************************
@ -1553,220 +1553,192 @@ void cm_prepare_el3_exit_ns(void)
static void el1_sysregs_context_save(el1_sysregs_t *ctx)
{
write_ctx_reg(ctx, CTX_SPSR_EL1, read_spsr_el1());
write_ctx_reg(ctx, CTX_ELR_EL1, read_elr_el1());
write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1());
write_el1_ctx_common(ctx, elr_el1, read_elr_el1());
#if (!ERRATA_SPECULATIVE_AT)
write_ctx_reg(ctx, CTX_SCTLR_EL1, read_sctlr_el1());
write_ctx_reg(ctx, CTX_TCR_EL1, read_tcr_el1());
write_el1_ctx_common(ctx, sctlr_el1, read_sctlr_el1());
write_el1_ctx_common(ctx, tcr_el1, read_tcr_el1());
#endif /* (!ERRATA_SPECULATIVE_AT) */
write_ctx_reg(ctx, CTX_CPACR_EL1, read_cpacr_el1());
write_ctx_reg(ctx, CTX_CSSELR_EL1, read_csselr_el1());
write_ctx_reg(ctx, CTX_SP_EL1, read_sp_el1());
write_ctx_reg(ctx, CTX_ESR_EL1, read_esr_el1());
write_ctx_reg(ctx, CTX_TTBR0_EL1, read_ttbr0_el1());
write_ctx_reg(ctx, CTX_TTBR1_EL1, read_ttbr1_el1());
write_ctx_reg(ctx, CTX_MAIR_EL1, read_mair_el1());
write_ctx_reg(ctx, CTX_AMAIR_EL1, read_amair_el1());
write_ctx_reg(ctx, CTX_ACTLR_EL1, read_actlr_el1());
write_ctx_reg(ctx, CTX_TPIDR_EL1, read_tpidr_el1());
write_ctx_reg(ctx, CTX_TPIDR_EL0, read_tpidr_el0());
write_ctx_reg(ctx, CTX_TPIDRRO_EL0, read_tpidrro_el0());
write_ctx_reg(ctx, CTX_PAR_EL1, read_par_el1());
write_ctx_reg(ctx, CTX_FAR_EL1, read_far_el1());
write_ctx_reg(ctx, CTX_AFSR0_EL1, read_afsr0_el1());
write_ctx_reg(ctx, CTX_AFSR1_EL1, read_afsr1_el1());
write_ctx_reg(ctx, CTX_CONTEXTIDR_EL1, read_contextidr_el1());
write_ctx_reg(ctx, CTX_VBAR_EL1, read_vbar_el1());
write_ctx_reg(ctx, CTX_MDCCINT_EL1, read_mdccint_el1());
write_ctx_reg(ctx, CTX_MDSCR_EL1, read_mdscr_el1());
write_el1_ctx_common(ctx, cpacr_el1, read_cpacr_el1());
write_el1_ctx_common(ctx, csselr_el1, read_csselr_el1());
write_el1_ctx_common(ctx, sp_el1, read_sp_el1());
write_el1_ctx_common(ctx, esr_el1, read_esr_el1());
write_el1_ctx_common(ctx, ttbr0_el1, read_ttbr0_el1());
write_el1_ctx_common(ctx, ttbr1_el1, read_ttbr1_el1());
write_el1_ctx_common(ctx, mair_el1, read_mair_el1());
write_el1_ctx_common(ctx, amair_el1, read_amair_el1());
write_el1_ctx_common(ctx, actlr_el1, read_actlr_el1());
write_el1_ctx_common(ctx, tpidr_el1, read_tpidr_el1());
write_el1_ctx_common(ctx, tpidr_el0, read_tpidr_el0());
write_el1_ctx_common(ctx, tpidrro_el0, read_tpidrro_el0());
write_el1_ctx_common(ctx, par_el1, read_par_el1());
write_el1_ctx_common(ctx, far_el1, read_far_el1());
write_el1_ctx_common(ctx, afsr0_el1, read_afsr0_el1());
write_el1_ctx_common(ctx, afsr1_el1, read_afsr1_el1());
write_el1_ctx_common(ctx, contextidr_el1, read_contextidr_el1());
write_el1_ctx_common(ctx, vbar_el1, read_vbar_el1());
write_el1_ctx_common(ctx, mdccint_el1, read_mdccint_el1());
write_el1_ctx_common(ctx, mdscr_el1, read_mdscr_el1());
#if CTX_INCLUDE_AARCH32_REGS
write_ctx_reg(ctx, CTX_SPSR_ABT, read_spsr_abt());
write_ctx_reg(ctx, CTX_SPSR_UND, read_spsr_und());
write_ctx_reg(ctx, CTX_SPSR_IRQ, read_spsr_irq());
write_ctx_reg(ctx, CTX_SPSR_FIQ, read_spsr_fiq());
write_ctx_reg(ctx, CTX_DACR32_EL2, read_dacr32_el2());
write_ctx_reg(ctx, CTX_IFSR32_EL2, read_ifsr32_el2());
#endif /* CTX_INCLUDE_AARCH32_REGS */
if (CTX_INCLUDE_AARCH32_REGS) {
/* Save Aarch32 registers */
write_el1_ctx_aarch32(ctx, spsr_abt, read_spsr_abt());
write_el1_ctx_aarch32(ctx, spsr_und, read_spsr_und());
write_el1_ctx_aarch32(ctx, spsr_irq, read_spsr_irq());
write_el1_ctx_aarch32(ctx, spsr_fiq, read_spsr_fiq());
write_el1_ctx_aarch32(ctx, dacr32_el2, read_dacr32_el2());
write_el1_ctx_aarch32(ctx, ifsr32_el2, read_ifsr32_el2());
}
#if NS_TIMER_SWITCH
write_ctx_reg(ctx, CTX_CNTP_CTL_EL0, read_cntp_ctl_el0());
write_ctx_reg(ctx, CTX_CNTP_CVAL_EL0, read_cntp_cval_el0());
write_ctx_reg(ctx, CTX_CNTV_CTL_EL0, read_cntv_ctl_el0());
write_ctx_reg(ctx, CTX_CNTV_CVAL_EL0, read_cntv_cval_el0());
write_ctx_reg(ctx, CTX_CNTKCTL_EL1, read_cntkctl_el1());
#endif /* NS_TIMER_SWITCH */
if (NS_TIMER_SWITCH) {
/* Save NS Timer registers */
write_el1_ctx_arch_timer(ctx, cntp_ctl_el0, read_cntp_ctl_el0());
write_el1_ctx_arch_timer(ctx, cntp_cval_el0, read_cntp_cval_el0());
write_el1_ctx_arch_timer(ctx, cntv_ctl_el0, read_cntv_ctl_el0());
write_el1_ctx_arch_timer(ctx, cntv_cval_el0, read_cntv_cval_el0());
write_el1_ctx_arch_timer(ctx, cntkctl_el1, read_cntkctl_el1());
}
#if ENABLE_FEAT_MTE2
write_ctx_reg(ctx, CTX_TFSRE0_EL1, read_tfsre0_el1());
write_ctx_reg(ctx, CTX_TFSR_EL1, read_tfsr_el1());
write_ctx_reg(ctx, CTX_RGSR_EL1, read_rgsr_el1());
write_ctx_reg(ctx, CTX_GCR_EL1, read_gcr_el1());
#endif /* ENABLE_FEAT_MTE2 */
if (is_feat_mte2_supported()) {
write_el1_ctx_mte2(ctx, tfsre0_el1, read_tfsre0_el1());
write_el1_ctx_mte2(ctx, tfsr_el1, read_tfsr_el1());
write_el1_ctx_mte2(ctx, rgsr_el1, read_rgsr_el1());
write_el1_ctx_mte2(ctx, gcr_el1, read_gcr_el1());
}
#if ENABLE_FEAT_RAS
if (is_feat_ras_supported()) {
write_ctx_reg(ctx, CTX_DISR_EL1, read_disr_el1());
write_el1_ctx_ras(ctx, disr_el1, read_disr_el1());
}
#endif
#if ENABLE_FEAT_S1PIE
if (is_feat_s1pie_supported()) {
write_ctx_reg(ctx, CTX_PIRE0_EL1, read_pire0_el1());
write_ctx_reg(ctx, CTX_PIR_EL1, read_pir_el1());
write_el1_ctx_s1pie(ctx, pire0_el1, read_pire0_el1());
write_el1_ctx_s1pie(ctx, pir_el1, read_pir_el1());
}
#endif
#if ENABLE_FEAT_S1POE
if (is_feat_s1poe_supported()) {
write_ctx_reg(ctx, CTX_POR_EL1, read_por_el1());
write_el1_ctx_s1poe(ctx, por_el1, read_por_el1());
}
#endif
#if ENABLE_FEAT_S2POE
if (is_feat_s2poe_supported()) {
write_ctx_reg(ctx, CTX_S2POR_EL1, read_s2por_el1());
write_el1_ctx_s2poe(ctx, s2por_el1, read_s2por_el1());
}
#endif
#if ENABLE_FEAT_TCR2
if (is_feat_tcr2_supported()) {
write_ctx_reg(ctx, CTX_TCR2_EL1, read_tcr2_el1());
write_el1_ctx_tcr2(ctx, tcr2_el1, read_tcr2_el1());
}
#endif
#if ENABLE_TRF_FOR_NS
if (is_feat_trf_supported()) {
write_ctx_reg(ctx, CTX_TRFCR_EL1, read_trfcr_el1());
write_el1_ctx_trf(ctx, trfcr_el1, read_trfcr_el1());
}
#endif
#if ENABLE_FEAT_CSV2_2
if (is_feat_csv2_2_supported()) {
write_ctx_reg(ctx, CTX_SCXTNUM_EL0, read_scxtnum_el0());
write_ctx_reg(ctx, CTX_SCXTNUM_EL1, read_scxtnum_el1());
write_el1_ctx_csv2_2(ctx, scxtnum_el0, read_scxtnum_el0());
write_el1_ctx_csv2_2(ctx, scxtnum_el1, read_scxtnum_el1());
}
#endif
#if ENABLE_FEAT_GCS
if (is_feat_gcs_supported()) {
write_ctx_reg(ctx, CTX_GCSCR_EL1, read_gcscr_el1());
write_ctx_reg(ctx, CTX_GCSCRE0_EL1, read_gcscre0_el1());
write_ctx_reg(ctx, CTX_GCSPR_EL1, read_gcspr_el1());
write_ctx_reg(ctx, CTX_GCSPR_EL0, read_gcspr_el0());
write_el1_ctx_gcs(ctx, gcscr_el1, read_gcscr_el1());
write_el1_ctx_gcs(ctx, gcscre0_el1, read_gcscre0_el1());
write_el1_ctx_gcs(ctx, gcspr_el1, read_gcspr_el1());
write_el1_ctx_gcs(ctx, gcspr_el0, read_gcspr_el0());
}
#endif
}
static void el1_sysregs_context_restore(el1_sysregs_t *ctx)
{
write_spsr_el1(read_ctx_reg(ctx, CTX_SPSR_EL1));
write_elr_el1(read_ctx_reg(ctx, CTX_ELR_EL1));
write_spsr_el1(read_el1_ctx_common(ctx, spsr_el1));
write_elr_el1(read_el1_ctx_common(ctx, elr_el1));
#if (!ERRATA_SPECULATIVE_AT)
write_sctlr_el1(read_ctx_reg(ctx, CTX_SCTLR_EL1));
write_tcr_el1(read_ctx_reg(ctx, CTX_TCR_EL1));
write_sctlr_el1(read_el1_ctx_common(ctx, sctlr_el1));
write_tcr_el1(read_el1_ctx_common(ctx, tcr_el1));
#endif /* (!ERRATA_SPECULATIVE_AT) */
write_cpacr_el1(read_ctx_reg(ctx, CTX_CPACR_EL1));
write_csselr_el1(read_ctx_reg(ctx, CTX_CSSELR_EL1));
write_sp_el1(read_ctx_reg(ctx, CTX_SP_EL1));
write_esr_el1(read_ctx_reg(ctx, CTX_ESR_EL1));
write_ttbr0_el1(read_ctx_reg(ctx, CTX_TTBR0_EL1));
write_ttbr1_el1(read_ctx_reg(ctx, CTX_TTBR1_EL1));
write_mair_el1(read_ctx_reg(ctx, CTX_MAIR_EL1));
write_amair_el1(read_ctx_reg(ctx, CTX_AMAIR_EL1));
write_actlr_el1(read_ctx_reg(ctx, CTX_ACTLR_EL1));
write_tpidr_el1(read_ctx_reg(ctx, CTX_TPIDR_EL1));
write_tpidr_el0(read_ctx_reg(ctx, CTX_TPIDR_EL0));
write_tpidrro_el0(read_ctx_reg(ctx, CTX_TPIDRRO_EL0));
write_par_el1(read_ctx_reg(ctx, CTX_PAR_EL1));
write_far_el1(read_ctx_reg(ctx, CTX_FAR_EL1));
write_afsr0_el1(read_ctx_reg(ctx, CTX_AFSR0_EL1));
write_afsr1_el1(read_ctx_reg(ctx, CTX_AFSR1_EL1));
write_contextidr_el1(read_ctx_reg(ctx, CTX_CONTEXTIDR_EL1));
write_vbar_el1(read_ctx_reg(ctx, CTX_VBAR_EL1));
write_mdccint_el1(read_ctx_reg(ctx, CTX_MDCCINT_EL1));
write_mdscr_el1(read_ctx_reg(ctx, CTX_MDSCR_EL1));
write_cpacr_el1(read_el1_ctx_common(ctx, cpacr_el1));
write_csselr_el1(read_el1_ctx_common(ctx, csselr_el1));
write_sp_el1(read_el1_ctx_common(ctx, sp_el1));
write_esr_el1(read_el1_ctx_common(ctx, esr_el1));
write_ttbr0_el1(read_el1_ctx_common(ctx, ttbr0_el1));
write_ttbr1_el1(read_el1_ctx_common(ctx, ttbr1_el1));
write_mair_el1(read_el1_ctx_common(ctx, mair_el1));
write_amair_el1(read_el1_ctx_common(ctx, amair_el1));
write_actlr_el1(read_el1_ctx_common(ctx, actlr_el1));
write_tpidr_el1(read_el1_ctx_common(ctx, tpidr_el1));
write_tpidr_el0(read_el1_ctx_common(ctx, tpidr_el0));
write_tpidrro_el0(read_el1_ctx_common(ctx, tpidrro_el0));
write_par_el1(read_el1_ctx_common(ctx, par_el1));
write_far_el1(read_el1_ctx_common(ctx, far_el1));
write_afsr0_el1(read_el1_ctx_common(ctx, afsr0_el1));
write_afsr1_el1(read_el1_ctx_common(ctx, afsr1_el1));
write_contextidr_el1(read_el1_ctx_common(ctx, contextidr_el1));
write_vbar_el1(read_el1_ctx_common(ctx, vbar_el1));
write_mdccint_el1(read_el1_ctx_common(ctx, mdccint_el1));
write_mdscr_el1(read_el1_ctx_common(ctx, mdscr_el1));
#if CTX_INCLUDE_AARCH32_REGS
write_spsr_abt(read_ctx_reg(ctx, CTX_SPSR_ABT));
write_spsr_und(read_ctx_reg(ctx, CTX_SPSR_UND));
write_spsr_irq(read_ctx_reg(ctx, CTX_SPSR_IRQ));
write_spsr_fiq(read_ctx_reg(ctx, CTX_SPSR_FIQ));
write_dacr32_el2(read_ctx_reg(ctx, CTX_DACR32_EL2));
write_ifsr32_el2(read_ctx_reg(ctx, CTX_IFSR32_EL2));
#endif /* CTX_INCLUDE_AARCH32_REGS */
if (CTX_INCLUDE_AARCH32_REGS) {
/* Restore Aarch32 registers */
write_spsr_abt(read_el1_ctx_aarch32(ctx, spsr_abt));
write_spsr_und(read_el1_ctx_aarch32(ctx, spsr_und));
write_spsr_irq(read_el1_ctx_aarch32(ctx, spsr_irq));
write_spsr_fiq(read_el1_ctx_aarch32(ctx, spsr_fiq));
write_dacr32_el2(read_el1_ctx_aarch32(ctx, dacr32_el2));
write_ifsr32_el2(read_el1_ctx_aarch32(ctx, ifsr32_el2));
}
#if NS_TIMER_SWITCH
write_cntp_ctl_el0(read_ctx_reg(ctx, CTX_CNTP_CTL_EL0));
write_cntp_cval_el0(read_ctx_reg(ctx, CTX_CNTP_CVAL_EL0));
write_cntv_ctl_el0(read_ctx_reg(ctx, CTX_CNTV_CTL_EL0));
write_cntv_cval_el0(read_ctx_reg(ctx, CTX_CNTV_CVAL_EL0));
write_cntkctl_el1(read_ctx_reg(ctx, CTX_CNTKCTL_EL1));
#endif /* NS_TIMER_SWITCH */
if (NS_TIMER_SWITCH) {
/* Restore NS Timer registers */
write_cntp_ctl_el0(read_el1_ctx_arch_timer(ctx, cntp_ctl_el0));
write_cntp_cval_el0(read_el1_ctx_arch_timer(ctx, cntp_cval_el0));
write_cntv_ctl_el0(read_el1_ctx_arch_timer(ctx, cntv_ctl_el0));
write_cntv_cval_el0(read_el1_ctx_arch_timer(ctx, cntv_cval_el0));
write_cntkctl_el1(read_el1_ctx_arch_timer(ctx, cntkctl_el1));
}
#if ENABLE_FEAT_MTE2
write_tfsre0_el1(read_ctx_reg(ctx, CTX_TFSRE0_EL1));
write_tfsr_el1(read_ctx_reg(ctx, CTX_TFSR_EL1));
write_rgsr_el1(read_ctx_reg(ctx, CTX_RGSR_EL1));
write_gcr_el1(read_ctx_reg(ctx, CTX_GCR_EL1));
#endif /* ENABLE_FEAT_MTE2 */
if (is_feat_mte2_supported()) {
write_tfsre0_el1(read_el1_ctx_mte2(ctx, tfsre0_el1));
write_tfsr_el1(read_el1_ctx_mte2(ctx, tfsr_el1));
write_rgsr_el1(read_el1_ctx_mte2(ctx, rgsr_el1));
write_gcr_el1(read_el1_ctx_mte2(ctx, gcr_el1));
}
#if ENABLE_FEAT_RAS
if (is_feat_ras_supported()) {
write_disr_el1(read_ctx_reg(ctx, CTX_DISR_EL1));
write_disr_el1(read_el1_ctx_ras(ctx, disr_el1));
}
#endif
#if ENABLE_FEAT_S1PIE
if (is_feat_s1pie_supported()) {
write_pire0_el1(read_ctx_reg(ctx, CTX_PIRE0_EL1));
write_pir_el1(read_ctx_reg(ctx, CTX_PIR_EL1));
write_pire0_el1(read_el1_ctx_s1pie(ctx, pire0_el1));
write_pir_el1(read_el1_ctx_s1pie(ctx, pir_el1));
}
#endif
#if ENABLE_FEAT_S1POE
if (is_feat_s1poe_supported()) {
write_por_el1(read_ctx_reg(ctx, CTX_POR_EL1));
write_por_el1(read_el1_ctx_s1poe(ctx, por_el1));
}
#endif
#if ENABLE_FEAT_S2POE
if (is_feat_s2poe_supported()) {
write_s2por_el1(read_ctx_reg(ctx, CTX_S2POR_EL1));
write_s2por_el1(read_el1_ctx_s2poe(ctx, s2por_el1));
}
#endif
#if ENABLE_FEAT_TCR2
if (is_feat_tcr2_supported()) {
write_tcr2_el1(read_ctx_reg(ctx, CTX_TCR2_EL1));
write_tcr2_el1(read_el1_ctx_tcr2(ctx, tcr2_el1));
}
#endif
#if ENABLE_TRF_FOR_NS
if (is_feat_trf_supported()) {
write_trfcr_el1(read_ctx_reg(ctx, CTX_TRFCR_EL1));
write_trfcr_el1(read_el1_ctx_trf(ctx, trfcr_el1));
}
#endif
#if ENABLE_FEAT_CSV2_2
if (is_feat_csv2_2_supported()) {
write_scxtnum_el0(read_ctx_reg(ctx, CTX_SCXTNUM_EL0));
write_scxtnum_el1(read_ctx_reg(ctx, CTX_SCXTNUM_EL1));
write_scxtnum_el0(read_el1_ctx_csv2_2(ctx, scxtnum_el0));
write_scxtnum_el1(read_el1_ctx_csv2_2(ctx, scxtnum_el1));
}
#endif
#if ENABLE_FEAT_GCS
if (is_feat_gcs_supported()) {
write_gcscr_el1(read_ctx_reg(ctx, CTX_GCSCR_EL1));
write_gcscre0_el1(read_ctx_reg(ctx, CTX_GCSCRE0_EL1));
write_gcspr_el1(read_ctx_reg(ctx, CTX_GCSPR_EL1));
write_gcspr_el0(read_ctx_reg(ctx, CTX_GCSPR_EL0));
write_gcscr_el1(read_el1_ctx_gcs(ctx, gcscr_el1));
write_gcscre0_el1(read_el1_ctx_gcs(ctx, gcscre0_el1));
write_gcspr_el1(read_el1_ctx_gcs(ctx, gcspr_el1));
write_gcspr_el0(read_el1_ctx_gcs(ctx, gcspr_el0));
}
#endif
}
/*******************************************************************************

View file

@ -62,47 +62,50 @@ static void populate_cpu_err_data(cpu_err_info *cpu_info,
cpu_info->SecurityState = security_state;
/* populate CPU EL1 context information. */
cpu_info->ErrCtxEl1Reg[0] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_ELR_EL1);
cpu_info->ErrCtxEl1Reg[1] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_ESR_EL1);
cpu_info->ErrCtxEl1Reg[2] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_FAR_EL1);
cpu_info->ErrCtxEl1Reg[0] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
elr_el1);
cpu_info->ErrCtxEl1Reg[1] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
esr_el1);
cpu_info->ErrCtxEl1Reg[2] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
far_el1);
cpu_info->ErrCtxEl1Reg[3] = read_isr_el1();
cpu_info->ErrCtxEl1Reg[4] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_MAIR_EL1);
cpu_info->ErrCtxEl1Reg[4] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
mair_el1);
cpu_info->ErrCtxEl1Reg[5] = read_midr_el1();
cpu_info->ErrCtxEl1Reg[6] = read_mpidr_el1();
#if (ERRATA_SPECULATIVE_AT)
cpu_info->ErrCtxEl1Reg[7] = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1);
#else
cpu_info->ErrCtxEl1Reg[7] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SCTLR_EL1);
cpu_info->ErrCtxEl1Reg[7] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
sctlr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
cpu_info->ErrCtxEl1Reg[8] = read_ctx_reg(get_gpregs_ctx(ctx),
CTX_GPREG_SP_EL0);
cpu_info->ErrCtxEl1Reg[9] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SP_EL1);
cpu_info->ErrCtxEl1Reg[10] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SPSR_EL1);
cpu_info->ErrCtxEl1Reg[9] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
sp_el1);
cpu_info->ErrCtxEl1Reg[10] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
spsr_el1);
#if (ERRATA_SPECULATIVE_AT)
cpu_info->ErrCtxEl1Reg[11] = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_TCR_EL1);
#else
cpu_info->ErrCtxEl1Reg[11] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TCR_EL1);
cpu_info->ErrCtxEl1Reg[11] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tcr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
cpu_info->ErrCtxEl1Reg[12] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TPIDR_EL0);
cpu_info->ErrCtxEl1Reg[13] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TPIDR_EL1);
cpu_info->ErrCtxEl1Reg[14] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TPIDRRO_EL0);
cpu_info->ErrCtxEl1Reg[15] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TTBR0_EL1);
cpu_info->ErrCtxEl1Reg[16] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TTBR1_EL1);
cpu_info->ErrCtxEl1Reg[12] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tpidr_el0);
cpu_info->ErrCtxEl1Reg[13] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tpidr_el1);
cpu_info->ErrCtxEl1Reg[14] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tpidrro_el0);
cpu_info->ErrCtxEl1Reg[15] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
ttbr0_el1);
cpu_info->ErrCtxEl1Reg[16] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
ttbr1_el1);
#if CTX_INCLUDE_EL2_REGS
cpu_info->ErrCtxEl2Reg[0] = read_el2_ctx_common(get_el2_sysregs_ctx(ctx),

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -142,7 +142,7 @@ int32_t tegra_fiq_get_intr_context(void)
val = read_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_SP_EL0));
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X2), (val));
val = read_ctx_reg((el1state_ctx), (uint32_t)(CTX_SP_EL1));
val = read_el1_ctx_common(el1state_ctx, sp_el1);
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X3), (val));
return 0;

View file

@ -356,10 +356,10 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
* will re-init this info from non-secure software when the
* core come online.
*/
actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
actlr_elx = read_el1_ctx_common((get_el1_sysregs_ctx(ctx)), actlr_el1);
actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
actlr_elx |= DENVER_CPU_PMSTATE_C1;
write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
write_el1_ctx_common((get_el1_sysregs_ctx(ctx)), actlr_el1, actlr_elx);
/*
* Check if we are exiting from deep sleep and restore SE

View file

@ -142,10 +142,10 @@ void qtiseclib_cb_get_ns_ctx(qtiseclib_dbg_a64_ctxt_regs_type *qti_ns_ctx)
qti_ns_ctx->elr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_ELR_EL3);
qti_ns_ctx->spsr_el1 =
read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SPSR_EL1);
read_el1_ctx_common(get_el1_sysregs_ctx(ctx), spsr_el1);
qti_ns_ctx->elr_el1 =
read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_ELR_EL1);
qti_ns_ctx->sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SP_EL1);
read_el1_ctx_common(get_el1_sysregs_ctx(ctx), elr_el1);
qti_ns_ctx->sp_el1 = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sp_el1);
qti_ns_ctx->x0 = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0);
qti_ns_ctx->x1 = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -160,9 +160,9 @@ static uint64_t trusty_fiq_handler(uint32_t id,
(void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
ctx->fiq_sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1);
ctx->fiq_sp_el1 = read_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1);
write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
write_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1, ctx->fiq_handler_sp);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
SMC_RET0(handle);
@ -221,7 +221,7 @@ static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t
*/
(void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
ctx->fiq_handler_active = 0;
write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
write_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1, ctx->fiq_sp_el1);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
SMC_RET0(handle);

View file

@ -319,7 +319,7 @@ static void spmc_el0_sp_setup_mmu(struct secure_partition_desc *sp,
xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
EL1_EL0_REGIME);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1,
mmu_cfg_params[MMU_CFG_MAIR]);
/* Store the initialised SCTLR_EL1 value in the cpu_context */
@ -327,27 +327,27 @@ static void spmc_el0_sp_setup_mmu(struct secure_partition_desc *sp,
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]);
#else
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1,
mmu_cfg_params[MMU_CFG_TCR]);
#endif /* ERRATA_SPECULATIVE_AT */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1,
mmu_cfg_params[MMU_CFG_TTBR0]);
}
static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
{
u_register_t sctlr_el1;
u_register_t sctlr_el1_val;
/* Setup SCTLR_EL1 */
#if (ERRATA_SPECULATIVE_AT)
sctlr_el1 = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
sctlr_el1_val = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1);
#else
sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
sctlr_el1_val = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
sctlr_el1 |=
sctlr_el1_val |=
/*SCTLR_EL1_RES1 |*/
/* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
SCTLR_UCI_BIT |
@ -368,7 +368,7 @@ static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
/* Enable MMU. */
SCTLR_M_BIT;
sctlr_el1 &= ~(
sctlr_el1_val &= ~(
/* Explicit data accesses at EL0 are little-endian. */
SCTLR_E0E_BIT |
/*
@ -383,9 +383,9 @@ static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
/* Store the initialised SCTLR_EL1 value in the cpu_context */
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1);
CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1_val);
#else
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_el1_val);
#endif /* ERRATA_SPECULATIVE_AT */
}
@ -400,10 +400,10 @@ static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
/* Setup other system registers. */
/* Shim Exception Vector Base Address */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), vbar_el1,
SPM_SHIM_EXCEPTIONS_PTR);
#if NS_TIMER_SWITCH
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cntkctl_el1,
EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
#endif
@ -414,7 +414,7 @@ static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
* TTA: Enable access to trace registers.
* ZEN (v8.2): Trap SVE instructions and access to SVE registers.
*/
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cpacr_el1,
CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
}

View file

@ -27,7 +27,7 @@
void spm_sp_setup(sp_context_t *sp_ctx)
{
cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
u_register_t sctlr_el1;
u_register_t sctlr_el1_val;
/* Pointer to the MP information from the platform port. */
const spm_mm_boot_info_t *sp_boot_info =
plat_get_secure_partition_boot_info(NULL);
@ -122,7 +122,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
EL1_EL0_REGIME);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1,
mmu_cfg_params[MMU_CFG_MAIR]);
/* Store the initialised SCTLR_EL1 value in the cpu_context */
@ -130,22 +130,22 @@ void spm_sp_setup(sp_context_t *sp_ctx)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]);
#else
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1,
mmu_cfg_params[MMU_CFG_TCR]);
#endif /* ERRATA_SPECULATIVE_AT */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1,
mmu_cfg_params[MMU_CFG_TTBR0]);
/* Setup SCTLR_EL1 */
#if (ERRATA_SPECULATIVE_AT)
sctlr_el1 = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
sctlr_el1_val = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1);
#else
sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
sctlr_el1_val = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
sctlr_el1 |=
sctlr_el1_val |=
/*SCTLR_EL1_RES1 |*/
/* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
SCTLR_UCI_BIT |
@ -167,7 +167,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
SCTLR_M_BIT
;
sctlr_el1 &= ~(
sctlr_el1_val &= ~(
/* Explicit data accesses at EL0 are little-endian. */
SCTLR_E0E_BIT |
/*
@ -182,9 +182,9 @@ void spm_sp_setup(sp_context_t *sp_ctx)
/* Store the initialised SCTLR_EL1 value in the cpu_context */
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1);
CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1_val);
#else
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_el1_val);
#endif /* ERRATA_SPECULATIVE_AT */
/*
@ -193,10 +193,10 @@ void spm_sp_setup(sp_context_t *sp_ctx)
*/
/* Shim Exception Vector Base Address */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), vbar_el1,
SPM_SHIM_EXCEPTIONS_PTR);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
write_el1_ctx_arch_timer(get_el1_sysregs_ctx(ctx), cntkctl_el1,
EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
/*
@ -206,7 +206,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* TTA: Enable access to trace registers.
* ZEN (v8.2): Trap SVE instructions and access to SVE registers.
*/
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cpacr_el1,
CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
/*