feat(drtm): flush dcache before DLME launch

Flush the data cache range before DLME launch to ensure that data
passed by DCE preamble is committed.

Signed-off-by: Manish Pandey <manish.pandey2@arm.com>
Change-Id: I9946fd3420a17b86d9f1483e8b2cd5880033454e
This commit is contained in:
Manish Pandey 2022-07-21 13:07:07 +01:00 committed by Manish V Badarkhe
parent 2c265975a7
commit 67471e75b3

View file

@ -322,8 +322,8 @@ static enum drtm_retc drtm_dl_check_args(uint64_t x1,
uint64_t dlme_start, dlme_end;
uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
uint64_t dlme_data_start, dlme_data_end;
uintptr_t args_mapping;
size_t args_mapping_size;
uintptr_t va_mapping;
size_t va_mapping_size;
struct_drtm_dl_args *a;
struct_drtm_dl_args args_buf;
int rc;
@ -334,16 +334,16 @@ static enum drtm_retc drtm_dl_check_args(uint64_t x1,
return INVALID_PARAMETERS;
}
args_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
va_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
/* check DRTM parameters are within NS address region */
rc = plat_drtm_validate_ns_region(x1, args_mapping_size);
rc = plat_drtm_validate_ns_region(x1, va_mapping_size);
if (rc != 0) {
ERROR("DRTM: parameters lies within secure memory\n");
return INVALID_PARAMETERS;
}
rc = mmap_add_dynamic_region_alloc_va(x1, &args_mapping, args_mapping_size,
rc = mmap_add_dynamic_region_alloc_va(x1, &va_mapping, va_mapping_size,
MT_MEMORY | MT_NS | MT_RO |
MT_SHAREABILITY_ISH);
if (rc != 0) {
@ -351,15 +351,14 @@ static enum drtm_retc drtm_dl_check_args(uint64_t x1,
__func__, rc);
return INTERNAL_ERROR;
}
a = (struct_drtm_dl_args *)args_mapping;
/*
* TODO: invalidate all data cache before reading the data passed by the
* DCE Preamble. This is required to avoid / defend against racing with
* cache evictions.
*/
a = (struct_drtm_dl_args *)va_mapping;
/* Sanitize cache of data passed in args by the DCE Preamble. */
flush_dcache_range(va_mapping, va_mapping_size);
args_buf = *a;
rc = mmap_remove_dynamic_region(args_mapping, args_mapping_size);
rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
if (rc) {
ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
" rc=%d\n", __func__, rc);
@ -458,6 +457,28 @@ static enum drtm_retc drtm_dl_check_args(uint64_t x1,
}
}
/*
* Map and sanitize the cache of data range passed by DCE Preamble. This
* is required to avoid / defend against racing with cache evictions
*/
va_mapping_size = ALIGNED_UP((dlme_end - dlme_start), DRTM_PAGE_SIZE);
rc = mmap_add_dynamic_region_alloc_va(dlme_img_start, &va_mapping, va_mapping_size,
MT_MEMORY | MT_NS | MT_RO |
MT_SHAREABILITY_ISH);
if (rc != 0) {
ERROR("DRTM: %s: mmap_add_dynamic_region_alloc_va() failed rc=%d\n",
__func__, rc);
return INTERNAL_ERROR;
}
flush_dcache_range(va_mapping, va_mapping_size);
rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
if (rc) {
ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
" rc=%d\n", __func__, rc);
panic();
}
*a_out = *a;
return SUCCESS;
}