mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-17 10:04:26 +00:00

On the ARMv8 architecture, cache maintenance operations by set/way on the last level of integrated cache do not affect the system cache. This means that such a flush or clean operation could result in the data being pushed out to the system cache rather than main memory. Another CPU could access this data before it enables its data cache or MMU. Such accesses could be serviced from the main memory instead of the system cache. If the data in the sysem cache has not yet been flushed or evicted to main memory then there could be a loss of coherency. The only mechanism to guarantee that the main memory will be updated is to use cache maintenance operations to the PoC by MVA(See section D3.4.11 (System level caches) of ARMv8-A Reference Manual (Issue A.g/ARM DDI0487A.G). This patch removes the reliance of Trusted Firmware on the flush by set/way operation to ensure visibility of data in the main memory. Cache maintenance operations by MVA are now used instead. The following are the broad category of changes: 1. The RW areas of BL2/BL31/BL32 are invalidated by MVA before the C runtime is initialised. This ensures that any stale cache lines at any level of cache are removed. 2. Updates to global data in runtime firmware (BL31) by the primary CPU are made visible to secondary CPUs using a cache clean operation by MVA. 3. Cache maintenance by set/way operations are only used prior to power down. NOTE: NON-UPSTREAM TRUSTED FIRMWARE CODE SHOULD MAKE EQUIVALENT CHANGES IN ORDER TO FUNCTION CORRECTLY ON PLATFORMS WITH SUPPORT FOR SYSTEM CACHES. Fixes ARM-software/tf-issues#205 Change-Id: I64f1b398de0432813a0e0881d70f8337681f6e9a
224 lines
6.9 KiB
ArmAsm
224 lines
6.9 KiB
ArmAsm
/*
|
|
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are met:
|
|
*
|
|
* Redistributions of source code must retain the above copyright notice, this
|
|
* list of conditions and the following disclaimer.
|
|
*
|
|
* Redistributions in binary form must reproduce the above copyright notice,
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
* and/or other materials provided with the distribution.
|
|
*
|
|
* Neither the name of ARM nor the names of its contributors may be used
|
|
* to endorse or promote products derived from this software without specific
|
|
* prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include <arch.h>
|
|
#include <asm_macros.S>
|
|
|
|
.globl flush_dcache_range
|
|
.globl clean_dcache_range
|
|
.globl inv_dcache_range
|
|
.globl dcsw_op_louis
|
|
.globl dcsw_op_all
|
|
.globl dcsw_op_level1
|
|
.globl dcsw_op_level2
|
|
.globl dcsw_op_level3
|
|
|
|
/*
|
|
* This macro can be used for implementing various data cache operations `op`
|
|
*/
|
|
.macro do_dcache_maintenance_by_mva op
|
|
dcache_line_size x2, x3
|
|
add x1, x0, x1
|
|
sub x3, x2, #1
|
|
bic x0, x0, x3
|
|
loop_\op:
|
|
dc \op, x0
|
|
add x0, x0, x2
|
|
cmp x0, x1
|
|
b.lo loop_\op
|
|
dsb sy
|
|
ret
|
|
.endm
|
|
/* ------------------------------------------
|
|
* Clean+Invalidate from base address till
|
|
* size. 'x0' = addr, 'x1' = size
|
|
* ------------------------------------------
|
|
*/
|
|
func flush_dcache_range
|
|
do_dcache_maintenance_by_mva civac
|
|
endfunc flush_dcache_range
|
|
|
|
/* ------------------------------------------
|
|
* Clean from base address till size.
|
|
* 'x0' = addr, 'x1' = size
|
|
* ------------------------------------------
|
|
*/
|
|
func clean_dcache_range
|
|
do_dcache_maintenance_by_mva cvac
|
|
endfunc clean_dcache_range
|
|
|
|
/* ------------------------------------------
|
|
* Invalidate from base address till
|
|
* size. 'x0' = addr, 'x1' = size
|
|
* ------------------------------------------
|
|
*/
|
|
func inv_dcache_range
|
|
do_dcache_maintenance_by_mva ivac
|
|
endfunc inv_dcache_range
|
|
|
|
|
|
/* ---------------------------------------------------------------
|
|
* Data cache operations by set/way to the level specified
|
|
*
|
|
* The main function, do_dcsw_op requires:
|
|
* x0: The operation type (0-2), as defined in arch.h
|
|
* x3: The last cache level to operate on
|
|
* x9: clidr_el1
|
|
* x10: The cache level to begin operation from
|
|
* and will carry out the operation on each data cache from level 0
|
|
* to the level in x3 in sequence
|
|
*
|
|
* The dcsw_op macro sets up the x3 and x9 parameters based on
|
|
* clidr_el1 cache information before invoking the main function
|
|
* ---------------------------------------------------------------
|
|
*/
|
|
|
|
.macro dcsw_op shift, fw, ls
|
|
mrs x9, clidr_el1
|
|
ubfx x3, x9, \shift, \fw
|
|
lsl x3, x3, \ls
|
|
mov x10, xzr
|
|
b do_dcsw_op
|
|
.endm
|
|
|
|
func do_dcsw_op
|
|
cbz x3, exit
|
|
adr x14, dcsw_loop_table // compute inner loop address
|
|
add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
|
|
mov x0, x9
|
|
mov w8, #1
|
|
loop1:
|
|
add x2, x10, x10, lsr #1 // work out 3x current cache level
|
|
lsr x1, x0, x2 // extract cache type bits from clidr
|
|
and x1, x1, #7 // mask the bits for current cache only
|
|
cmp x1, #2 // see what cache we have at this level
|
|
b.lt level_done // nothing to do if no cache or icache
|
|
|
|
msr csselr_el1, x10 // select current cache level in csselr
|
|
isb // isb to sych the new cssr&csidr
|
|
mrs x1, ccsidr_el1 // read the new ccsidr
|
|
and x2, x1, #7 // extract the length of the cache lines
|
|
add x2, x2, #4 // add 4 (line length offset)
|
|
ubfx x4, x1, #3, #10 // maximum way number
|
|
clz w5, w4 // bit position of way size increment
|
|
lsl w9, w4, w5 // w9 = aligned max way number
|
|
lsl w16, w8, w5 // w16 = way number loop decrement
|
|
orr w9, w10, w9 // w9 = combine way and cache number
|
|
ubfx w6, w1, #13, #15 // w6 = max set number
|
|
lsl w17, w8, w2 // w17 = set number loop decrement
|
|
dsb sy // barrier before we start this level
|
|
br x14 // jump to DC operation specific loop
|
|
|
|
.macro dcsw_loop _op
|
|
loop2_\_op:
|
|
lsl w7, w6, w2 // w7 = aligned max set number
|
|
|
|
loop3_\_op:
|
|
orr w11, w9, w7 // combine cache, way and set number
|
|
dc \_op, x11
|
|
subs w7, w7, w17 // decrement set number
|
|
b.ge loop3_\_op
|
|
|
|
subs x9, x9, x16 // decrement way number
|
|
b.ge loop2_\_op
|
|
|
|
b level_done
|
|
.endm
|
|
|
|
level_done:
|
|
add x10, x10, #2 // increment cache number
|
|
cmp x3, x10
|
|
b.gt loop1
|
|
msr csselr_el1, xzr // select cache level 0 in csselr
|
|
dsb sy // barrier to complete final cache operation
|
|
isb
|
|
exit:
|
|
ret
|
|
endfunc do_dcsw_op
|
|
|
|
dcsw_loop_table:
|
|
dcsw_loop isw
|
|
dcsw_loop cisw
|
|
dcsw_loop csw
|
|
|
|
|
|
func dcsw_op_louis
|
|
dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
|
|
endfunc dcsw_op_louis
|
|
|
|
|
|
func dcsw_op_all
|
|
dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
|
|
endfunc dcsw_op_all
|
|
|
|
/* ---------------------------------------------------------------
|
|
* Helper macro for data cache operations by set/way for the
|
|
* level specified
|
|
* ---------------------------------------------------------------
|
|
*/
|
|
.macro dcsw_op_level level
|
|
mrs x9, clidr_el1
|
|
mov x3, \level
|
|
sub x10, x3, #2
|
|
b do_dcsw_op
|
|
.endm
|
|
|
|
/* ---------------------------------------------------------------
|
|
* Data cache operations by set/way for level 1 cache
|
|
*
|
|
* The main function, do_dcsw_op requires:
|
|
* x0: The operation type (0-2), as defined in arch.h
|
|
* ---------------------------------------------------------------
|
|
*/
|
|
func dcsw_op_level1
|
|
dcsw_op_level #(1 << LEVEL_SHIFT)
|
|
endfunc dcsw_op_level1
|
|
|
|
/* ---------------------------------------------------------------
|
|
* Data cache operations by set/way for level 2 cache
|
|
*
|
|
* The main function, do_dcsw_op requires:
|
|
* x0: The operation type (0-2), as defined in arch.h
|
|
* ---------------------------------------------------------------
|
|
*/
|
|
func dcsw_op_level2
|
|
dcsw_op_level #(2 << LEVEL_SHIFT)
|
|
endfunc dcsw_op_level2
|
|
|
|
/* ---------------------------------------------------------------
|
|
* Data cache operations by set/way for level 3 cache
|
|
*
|
|
* The main function, do_dcsw_op requires:
|
|
* x0: The operation type (0-2), as defined in arch.h
|
|
* ---------------------------------------------------------------
|
|
*/
|
|
func dcsw_op_level3
|
|
dcsw_op_level #(3 << LEVEL_SHIFT)
|
|
endfunc dcsw_op_level3
|