diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index 2f9185da039..8046fd19945 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -927,12 +927,7 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) #endif core_mmu_save_mem_map(); - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { - IMSG("Initializing virtualization support"); - core_mmu_init_virtualization(); - } else { - core_mmu_init_phys_mem(); - } + core_mmu_init_phys_mem(); boot_mem_release_unused(); IMSG_RAW("\n"); diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index cdbc0fedefb..5db42eedf44 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -640,8 +640,6 @@ void core_mmu_set_default_prtn(void); void core_mmu_set_default_prtn_tbl(void); #endif -void core_mmu_init_virtualization(void); - /* Initialize physical memory pool */ void core_mmu_init_phys_mem(void); diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 6787a8ac07c..9a1b6c530db 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -2581,20 +2581,6 @@ bool is_nexus(const void *va) } #endif -void core_mmu_init_virtualization(void) -{ - paddr_t b1 = 0; - paddr_size_t s1 = 0; - - static_assert(ARRAY_SIZE(secure_only) <= 2); - if (ARRAY_SIZE(secure_only) == 2) { - b1 = secure_only[1].paddr; - s1 = secure_only[1].size; - } - virt_init_memory(&static_memory_map, secure_only[0].paddr, - secure_only[0].size, b1, s1); -} - vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) { assert(p->pa); @@ -2675,10 +2661,22 @@ static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa) void core_mmu_init_phys_mem(void) { - paddr_t ps = 0; - size_t size = 0; + /* + * Get virtual addr/size of RAM where TA are loaded/executedNSec + * shared mem allocated from teecore. + */ + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + paddr_t b1 = 0; + paddr_size_t s1 = 0; - if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + static_assert(ARRAY_SIZE(secure_only) <= 2); + if (ARRAY_SIZE(secure_only) == 2) { + b1 = secure_only[1].paddr; + s1 = secure_only[1].size; + } + virt_init_memory(&static_memory_map, secure_only[0].paddr, + secure_only[0].size, b1, s1); + } else { #ifdef CFG_WITH_PAGER /* * The pager uses all core memory so there's no need to add @@ -2689,6 +2687,8 @@ void core_mmu_init_phys_mem(void) #else /*!CFG_WITH_PAGER*/ size_t align = BIT(CORE_MMU_USER_CODE_SHIFT); paddr_t end_pa = 0; + size_t size = 0; + paddr_t ps = 0; paddr_t pa = 0; static_assert(ARRAY_SIZE(secure_only) <= 2);