arm-trusted-firmware/lib/xlat_tables/aarch64/xlat_tables.c
Soby Mathew 4c0d039076 Rework type usage in Trusted Firmware
This patch reworks type usage in generic code, drivers and ARM platform files
to make it more portable. The major changes done with respect to
type usage are as listed below:

* Use uintptr_t for storing address instead of uint64_t or unsigned long.
* Review usage of unsigned long as it can no longer be assumed to be 64 bit.
* Use u_register_t for register values whose width varies depending on
  whether AArch64 or AArch32.
* Use generic C types where-ever possible.

In addition to the above changes, this patch also modifies format specifiers
in print invocations so that they are AArch64/AArch32 agnostic. Only files
related to upcoming feature development have been reworked.

Change-Id: I9f8c78347c5a52ba7027ff389791f1dad63ee5f8
2016-07-18 17:52:15 +01:00

165 lines
5.7 KiB
C

/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
/*
* The virtual address space size must be a power of two (as set in TCR.T0SZ).
* As we start the initial lookup at level 1, it must also be between 2 GB and
* 512 GB (with the virtual address size therefore 31 to 39 bits). See section
* D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.i) for more
* information.
*/
CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 39) &&
IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
static unsigned long long tcr_ps_bits;
static unsigned long long calc_physical_addr_size_bits(
unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
assert((max_addr & ADDR_MASK_48_TO_63) == 0);
/* 48 bits address */
if (max_addr & ADDR_MASK_44_TO_47)
return TCR_PS_BITS_256TB;
/* 44 bits address */
if (max_addr & ADDR_MASK_42_TO_43)
return TCR_PS_BITS_16TB;
/* 42 bits address */
if (max_addr & ADDR_MASK_40_TO_41)
return TCR_PS_BITS_4TB;
/* 40 bits address */
if (max_addr & ADDR_MASK_36_TO_39)
return TCR_PS_BITS_1TB;
/* 36 bits address */
if (max_addr & ADDR_MASK_32_TO_35)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
}
void init_xlat_tables(void)
{
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa);
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
*
* _el: Exception level at which the function will run
* _tcr_extra: Extra bits to set in the TCR register. This mask will
* be OR'ed with the default TCR value.
* _tlbi_fct: Function to invalidate the TLBs at the current
* exception level
******************************************************************************/
#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
void enable_mmu_el##_el(unsigned int flags) \
{ \
uint64_t mair, tcr, ttbr; \
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
\
/* Set attributes in the right indices of the MAIR */ \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
ATTR_IWBWA_OWBWA_NTR_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
ATTR_NON_CACHEABLE_INDEX); \
write_mair_el##_el(mair); \
\
/* Invalidate TLBs at the current exception level */ \
_tlbi_fct(); \
\
/* Set TCR bits as well. */ \
/* Inner & outer WBWA & shareable + T0SZ = 32 */ \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
(64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
/* Set TTBR bits as well */ \
ttbr = (uint64_t) l1_xlation_table; \
write_ttbr0_el##_el(ttbr); \
\
/* Ensure all translation table writes have drained */ \
/* into memory, the TLB invalidation is complete, */ \
/* and translation register writes are committed */ \
/* before enabling the MMU */ \
dsb(); \
isb(); \
\
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
\
write_sctlr_el##_el(sctlr); \
\
/* Ensure the MMU enable takes effect immediately */ \
isb(); \
}
/* Define EL1 and EL3 variants of the function enabling the MMU */
DEFINE_ENABLE_MMU_EL(1,
(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3,
TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
tlbialle3)