diff --git a/core/arch/arm/include/mm/core_mmu_arch.h b/core/arch/arm/include/mm/core_mmu_arch.h
index 803c9c42e..3d1dd106b 100644
--- a/core/arch/arm/include/mm/core_mmu_arch.h
+++ b/core/arch/arm/include/mm/core_mmu_arch.h
@@ -163,6 +163,9 @@ static inline TEE_Result cache_op_outer(enum cache_op op __unused,
 }
 #endif
 
+/* Do section mapping, not support on LPAE */
+void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb);
+
 #if defined(ARM64)
 unsigned int core_mmu_arm64_get_pa_width(void);
 #endif
diff --git a/core/arch/arm/kernel/entry_a32.S b/core/arch/arm/kernel/entry_a32.S
index 4e9030987..9e6edcbea 100644
--- a/core/arch/arm/kernel/entry_a32.S
+++ b/core/arch/arm/kernel/entry_a32.S
@@ -192,6 +192,50 @@ END_FUNC reset_vect_table
 #endif
 	.endm
 
+/*
+ * Use r0-r5, can not push r4-r5 because they will
+ * be lost during the invalidate
+ */
+LOCAL_FUNC arm_cl1_d_invbysetway , :
+	mov		r0, #0
+	/* Select the D$ level */
+	lsl		r0, r0, #1
+	/* Select Cache */
+	mcr		p15, 2, r0, c0, c0, 0
+	isb
+	/* Read the Cache info */
+	mrc		p15, 1, r1, c0, c0, 0
+	/* r2 = maxway number */
+	ubfx		r2, r1, #3, #10
+	/* r4 = 32 - log2(ways), bit position of way index */
+	clz		r4, r2
+	and		r3, r1, #7		/* r3 = log2(linelen) - 4 */
+	add		r3, r3, #4		/* r3 = log2(linelen) */
+	ubfx		r1, r1, #13, #15	/* r1 = max set number */
+	/* r1 = max set number at correct position */
+	lsl		r1, r1, r3
+	mov		r0, #1
+	lsl		r3, r0, r3 	/* decrement set number */
+
+cl1_dcisw_loop_way:
+	/* Shift way number to position */
+	lsl		r2, r2, r4
+	/* Set Max Set Number in r5 */
+	mov		r5, r1
+	/* Combine the set/way and cache level */
+cl1_dcisw_loop:
+	orr		r0, r2, r5
+	/* DCISW Invalidate data line set/way */
+	mcr		p15, 0, r0, c7, c6, 2
+	subs		r5, r5, r3		/* decrement set number */
+	bge		cl1_dcisw_loop
+	lsr		r2, r2, r4
+	subs		r2, r2, #1		/* decrement way number */
+	bge		cl1_dcisw_loop_way
+	dsb
+	mov		pc, lr
+END_FUNC arm_cl1_d_invbysetway
+
 FUNC _start , :
 UNWIND(	.cantunwind)
 	/*
@@ -867,8 +911,7 @@ UNWIND(	.cantunwind)
 
 #if defined (CFG_BOOT_SECONDARY_REQUEST)
 	/* if L1 is not invalidated before, do it here */
-	mov	r0, #DCACHE_OP_INV
-	bl	dcache_op_level1
+	bl	arm_cl1_d_invbysetway
 #endif
 
 	bl	__get_core_pos
diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c
index e83a5e092..dd896cd56 100644
--- a/core/arch/arm/mm/core_mmu_lpae.c
+++ b/core/arch/arm/mm/core_mmu_lpae.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause)
 /*
  * Copyright (c) 2015-2016, 2022 Linaro Limited
+ * Copyright 2021 NXP
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -626,6 +627,14 @@ static void core_init_mmu_prtn_tee(struct mmu_partition *prtn,
 
 	/* Clear table before use */
 	memset(prtn->base_tables, 0, sizeof(base_xlation_table));
+#ifdef CFG_COCKPIT
+	/* Clear L2 table before use */
+	memset(prtn->xlat_tables, 0, XLAT_TABLES_SIZE);
+	/* Clear L2 table for TA before use */
+	memset(prtn->l2_ta_tables, 0, XLAT_TABLES_SIZE);
+	prtn->asid = 0;
+	prtn->xlat_tables_used = 0;
+#endif
 
 	for (n = 0; n < mem_map->count; n++)
 		if (!core_mmu_is_dynamic_vaspace(mem_map->map + n))
diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c
index 8d4accaa0..dafe30ee7 100644
--- a/core/arch/arm/mm/core_mmu_v7.c
+++ b/core/arch/arm/mm/core_mmu_v7.c
@@ -698,6 +698,45 @@ bool __noprof core_mmu_user_va_range_is_defined(void)
 	return true;
 }
 
+static void print_mmap_area(const struct tee_mmap_region *mm __maybe_unused,
+			    const char *str __maybe_unused)
+{
+	if (!(mm->attr & TEE_MATTR_VALID_BLOCK))
+		debug_print("%s [%08" PRIxVA " %08" PRIxVA "] not mapped",
+			    str, mm->va, mm->va + mm->size);
+	else
+		debug_print("%s [%08" PRIxVA " %08" PRIxVA "] %s-%s-%s-%s",
+			    str, mm->va, mm->va + mm->size,
+	mattr_is_cached(mm->attr) ? "MEM" : "DEV",
+	mm->attr & TEE_MATTR_PW ? "RW" : "RO",
+	mm->attr & TEE_MATTR_PX ? "X" : "XN",
+	mm->attr & TEE_MATTR_SECURE ? "S" : "NS");
+}
+
+void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb)
+{
+	uint32_t attr = mattr_to_desc(1, mm->attr);
+	size_t idx = mm->va >> SECTION_SHIFT;
+	paddr_t pa = 0;
+	size_t n;
+
+	print_mmap_area(mm, "section map");
+
+	attr = mattr_to_desc(1, mm->attr);
+	if (attr != INVALID_DESC)
+		pa = mm->pa;
+
+	n = ROUNDUP(mm->size, SECTION_SIZE) >> SECTION_SHIFT;
+	EMSG("n = %u, attr = %x, pa = %lx", n, attr, pa);
+	while (n--) {
+		assert(!attr || !ttb[idx] || ttb[idx] == (pa | attr));
+
+		ttb[idx] = pa | attr;
+		idx++;
+		pa += SECTION_SIZE;
+	}
+}
+
 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map)
 {
 	void *ttb1 = (void *)core_mmu_get_main_ttb_va(prtn);
diff --git a/core/arch/arm/plat-imx/a9_plat_init.S b/core/arch/arm/plat-imx/a9_plat_init.S
index 4050684c8..08c45d30f 100644
--- a/core/arch/arm/plat-imx/a9_plat_init.S
+++ b/core/arch/arm/plat-imx/a9_plat_init.S
@@ -3,7 +3,7 @@
  * Copyright (c) 2014, STMicroelectronics International N.V.
  * Copyright (c) 2016, Wind River Systems.
  * All rights reserved.
- * Copyright 2019 NXP
+ * Copyright 2019-2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -35,10 +35,10 @@
  * and with ARM registers R0, R1, R2, R3 being scratchable.
  */
 
-#include <arm32.h>
 #include <arm32_macros.S>
 #include <arm32_macros_cortex_a9.S>
 #include <asm.S>
+#include <arm.h>
 #include <kernel/tz_ssvce_def.h>
 #include <platform_config.h>
 
@@ -109,7 +109,7 @@ FUNC plat_cpu_reset_early , :
 	 * - L2 Prefetch hint enable (bit1=1)
 	 *
 	 * NSACR = 0x00020C00
-	 * - NSec cannot change ACTRL.SMP (NS_SMP bit18=0)
+	 * - NSec can change ACTRL.SMP (NS_SMP bit18=0)
 	 * - Nsec can lockdown TLB (TL bit17=1)
 	 * - NSec cannot access PLE (PLE bit16=0)
 	 * - NSec can use SIMD/VFP (CP10/CP11) (bit15:14=2b00, bit11:10=2b11)
@@ -127,7 +127,7 @@ FUNC plat_cpu_reset_early , :
 #endif
 	write_actlr r0
 
-	mov_imm r0, 0x00020C00
+	mov_imm r0, 0x00060C00
 	write_nsacr r0
 
 	read_pcr r0
diff --git a/core/arch/arm/plat-imx/conf.mk b/core/arch/arm/plat-imx/conf.mk
index d43c17069..d1fc2882e 100644
--- a/core/arch/arm/plat-imx/conf.mk
+++ b/core/arch/arm/plat-imx/conf.mk
@@ -75,6 +75,8 @@ mx8mp-flavorlist = \
 
 mx8qm-flavorlist = \
 	mx8qmmek \
+	mx8qmmekcockpita53 \
+	mx8qmmekcockpita72 \
 
 mx8qx-flavorlist = \
 	mx8qxpmek \
@@ -101,6 +103,7 @@ $(call force,CFG_MX6UL,y)
 $(call force,CFG_TEE_CORE_NB_CORE,1)
 $(call force,CFG_TZC380,y)
 include core/arch/arm/cpu/cortex-a7.mk
+CFG_BUSFREQ ?= y
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6ull-flavorlist)))
 $(call force,CFG_MX6,y)
 $(call force,CFG_MX6ULL,y)
@@ -115,10 +118,12 @@ $(call force,CFG_MX6,y)
 $(call force,CFG_MX6Q,y)
 $(call force,CFG_TEE_CORE_NB_CORE,4)
 $(call force,CFG_TZC380,y)
+CFG_BUSFREQ ?= y
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6qp-flavorlist)))
 $(call force,CFG_MX6,y)
 $(call force,CFG_MX6QP,y)
 $(call force,CFG_TEE_CORE_NB_CORE,4)
+CFG_BUSFREQ ?= y
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6d-flavorlist)))
 $(call force,CFG_MX6,y)
 $(call force,CFG_MX6D,y)
@@ -129,6 +134,7 @@ $(call force,CFG_MX6,y)
 $(call force,CFG_MX6DL,y)
 $(call force,CFG_TEE_CORE_NB_CORE,2)
 $(call force,CFG_TZC380,y)
+CFG_BUSFREQ ?= y
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6s-flavorlist)))
 $(call force,CFG_MX6,y)
 $(call force,CFG_MX6S,y)
@@ -152,13 +158,16 @@ else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6sx-flavorlist)))
 $(call force,CFG_MX6,y)
 $(call force,CFG_MX6SX,y)
 $(call force,CFG_TEE_CORE_NB_CORE,1)
+CFG_BUSFREQ ?= y
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx7s-flavorlist)))
 $(call force,CFG_MX7,y)
 $(call force,CFG_TEE_CORE_NB_CORE,1)
 include core/arch/arm/cpu/cortex-a7.mk
+CFG_BUSFREQ ?= y
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx7d-flavorlist)))
 $(call force,CFG_MX7,y)
 $(call force,CFG_TEE_CORE_NB_CORE,2)
+CFG_BUSFREQ ?= y
 include core/arch/arm/cpu/cortex-a7.mk
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx7ulp-flavorlist)))
 $(call force,CFG_MX7ULP,y)
@@ -202,6 +211,8 @@ CFG_IMX_LPUART ?= y
 CFG_DRAM_BASE ?= 0x80000000
 CFG_TEE_CORE_NB_CORE ?= 6
 $(call force,CFG_IMX_OCOTP,n)
+$(call force,CFG_SC_IPC_BASE,SC_IPC0_BASE)
+$(call force,CFG_TZC380,n)
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8qx-flavorlist)))
 $(call force,CFG_MX8QX,y)
 $(call force,CFG_ARM64_core,y)
@@ -210,6 +221,8 @@ CFG_IMX_LPUART ?= y
 CFG_DRAM_BASE ?= 0x80000000
 CFG_TEE_CORE_NB_CORE ?= 4
 $(call force,CFG_IMX_OCOTP,n)
+$(call force,CFG_SC_IPC_BASE,SC_IPC0_BASE)
+$(call force,CFG_TZC380,n)
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8dxl-flavorlist)))
 $(call force,CFG_MX8DXL,y)
 $(call force,CFG_ARM64_core,y)
@@ -218,6 +231,8 @@ CFG_IMX_LPUART ?= y
 CFG_DRAM_BASE ?= 0x80000000
 $(call force,CFG_TEE_CORE_NB_CORE,2)
 $(call force,CFG_IMX_OCOTP,n)
+$(call force,CFG_SC_IPC_BASE,SC_IPC0_BASE)
+$(call force,CFG_TZC380,n)
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8ulp-flavorlist)))
 $(call force,CFG_MX8ULP,y)
 $(call force,CFG_ARM64_core,y)
@@ -226,6 +241,8 @@ CFG_DRAM_BASE ?= 0x80000000
 CFG_TEE_CORE_NB_CORE ?= 2
 $(call force,CFG_NXP_SNVS,n)
 $(call force,CFG_IMX_OCOTP,n)
+$(call force,CFG_SC_IPC_BASE,SC_IPC0_BASE)
+$(call force,CFG_TZC380,n)
 CFG_IMX_MU ?= y
 CFG_IMX_ELE ?= n
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx93-flavorlist)))
@@ -237,10 +254,11 @@ CFG_TEE_CORE_NB_CORE ?= 2
 $(call force,CFG_NXP_SNVS,n)
 $(call force,CFG_IMX_OCOTP,n)
 $(call force,CFG_TZC380,n)
-$(call force,CFG_CRYPTO_DRIVER,n)
 $(call force,CFG_NXP_CAAM,n)
 CFG_IMX_MU ?= y
-CFG_IMX_ELE ?= n
+CFG_IMX_ELE ?= y
+CFG_IN_TREE_EARLY_TAS += trusted_keys/f04a0fe7-1f5d-4b9b-abf7-619b85b4ce8c
+CFG_IMX_ELE_TEST_PTA ?= n
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx95-flavorlist)))
 $(call force,CFG_MX95,y)
 $(call force,CFG_ARM64_core,y)
@@ -251,6 +269,9 @@ $(call force,CFG_NXP_SNVS,n)
 $(call force,CFG_IMX_OCOTP,n)
 $(call force,CFG_TZC380,n)
 $(call force,CFG_NXP_CAAM,n)
+CFG_IMX_MU ?= y
+CFG_IMX_ELE ?= y
+CFG_IN_TREE_EARLY_TAS += trusted_keys/f04a0fe7-1f5d-4b9b-abf7-619b85b4ce8c
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx91-flavorlist)))
 $(call force,CFG_MX91,y)
 $(call force,CFG_ARM64_core,y)
@@ -262,7 +283,7 @@ $(call force,CFG_IMX_OCOTP,n)
 $(call force,CFG_TZC380,n)
 $(call force,CFG_NXP_CAAM,n)
 CFG_IMX_MU ?= y
-CFG_IMX_ELE ?= n
+CFG_IMX_ELE ?= y
 else
 $(error Unsupported PLATFORM_FLAVOR "$(PLATFORM_FLAVOR)")
 endif
@@ -431,6 +452,24 @@ CFG_NSEC_DDR_1_SIZE  ?= 0x380000000UL
 CFG_CORE_ARM64_PA_BITS ?= 40
 endif
 
+ifneq (,$(filter $(PLATFORM_FLAVOR),mx8qmmekcockpita53))
+CFG_DRAM_BASE := 0x80000000
+CFG_DDR_SIZE ?= 0x40000000
+CFG_UART_BASE ?= UART0_BASE
+CFG_TEE_CORE_NB_CORE = 4
+$(call force,CFG_NXP_CAAM,n)
+CFG_COCKPIT ?= y
+endif
+
+ifneq (,$(filter $(PLATFORM_FLAVOR),mx8qmmekcockpita72))
+CFG_DRAM_BASE := 0xC0000000
+CFG_DDR_SIZE ?= 0x40000000
+CFG_UART_BASE ?= UART2_BASE
+CFG_SC_IPC_BASE = SC_IPC3_BASE
+$(call force,CFG_NXP_CAAM,n)
+CFG_COCKPIT ?= y
+endif
+
 ifneq (,$(filter $(PLATFORM_FLAVOR),mx8dxmek))
 CFG_DDR_SIZE ?= 0x40000000
 CFG_UART_BASE ?= UART0_BASE
@@ -520,8 +559,11 @@ endif
 ifeq ($(filter y, $(CFG_PSCI_ARM32)), y)
 CFG_HWSUPP_MEM_PERM_WXN = n
 CFG_IMX_WDOG ?= y
+CFG_PM_ARM32 ?= y
 endif
 
+CFG_IMX_PM ?= y
+
 ifeq ($(CFG_ARM64_core),y)
 # arm-v8 platforms
 include core/arch/arm/cpu/cortex-armv8-0.mk
@@ -531,13 +573,28 @@ $(call force,CFG_WITH_ARM_TRUSTED_FW,y)
 $(call force,CFG_SECURE_TIME_SOURCE_CNTPCT,y)
 
 CFG_CRYPTO_WITH_CE ?= y
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 
 supported-ta-targets = ta_arm64
 endif
 
+ifneq (,$(filter y, $(CFG_MX8MN) $(CFG_MX8MP) $(CFG_MX8DX) $(CFG_MX8DXL) $(CFG_MX93) $(CFG_MX91)))
+CFG_TZDRAM_START ?= ($(CFG_DRAM_BASE) + 0x16000000)
+else ifneq (,$(filter y, $(CFG_MX8ULP)))
+CFG_TZDRAM_START ?= ($(CFG_DRAM_BASE) + 0x26000000)
+else ifneq (,$(filter y, $(CFG_MX95)))
+CFG_TZDRAM_START ?= ($(CFG_DRAM_BASE) + 0x0C000000)
+# On i.MX95 we will have 32MB OP-TEE memory and 2MB Shared Memory after that.
+CFG_TZDRAM_SIZE ?= 0x02000000
+else ifneq (,$(filter y, $(CFG_MX8MM) $(CFG_MX8MQ) $(CFG_MX8QM) $(CFG_MX8QX)))
+CFG_TZDRAM_START ?= ($(CFG_DRAM_BASE) - 0x02000000 + $(CFG_DDR_SIZE))
+else
+CFG_TZDRAM_START ?= ($(CFG_DRAM_BASE) + 0x04000000)
+endif
+
 CFG_TZDRAM_SIZE ?= 0x01e00000
 CFG_SHMEM_SIZE ?= 0x00200000
-CFG_TZDRAM_START ?= ($(CFG_DRAM_BASE) - $(CFG_TZDRAM_SIZE) - $(CFG_SHMEM_SIZE) + $(CFG_DDR_SIZE))
 CFG_SHMEM_START ?= ($(CFG_TZDRAM_START) + $(CFG_TZDRAM_SIZE))
 
 # Enable embedded tests by default
@@ -558,12 +615,14 @@ CFG_IMX_OCOTP ?= y
 CFG_IMX_DIGPROG ?= y
 CFG_PKCS11_TA ?= y
 CFG_CORE_HUK_SUBKEY_COMPAT_USE_OTP_DIE_ID ?= y
+CFG_TZC380 ?= y
 
 # Almost all platforms include CAAM HW Modules, except the
 # ones forced to be disabled
-CFG_NXP_CAAM ?= n
+CFG_NXP_CAAM ?= y
 
 ifeq ($(CFG_NXP_CAAM),y)
+CFG_CRYPTO_DRIVER ?= y
 ifeq ($(filter y, $(CFG_MX8QM) $(CFG_MX8QX) $(CFG_MX8DXL)), y)
 CFG_IMX_SC ?= y
 CFG_IMX_MU ?= y
@@ -576,3 +635,7 @@ CFG_IMX_CAAM ?= y
 endif
 
 endif
+
+ifeq ($(CFG_BUSFREQ),y)
+$(call force,CFG_SM_PLATFORM_HANDLER,y)
+endif
diff --git a/core/arch/arm/plat-imx/imx_pl310.c b/core/arch/arm/plat-imx/imx_pl310.c
index ddf355e4c..0bd163086 100644
--- a/core/arch/arm/plat-imx/imx_pl310.c
+++ b/core/arch/arm/plat-imx/imx_pl310.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
  * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
  *
  * Peng Fan <peng.fan@nxp.com>
  */
@@ -15,7 +16,7 @@
 #include <sm/optee_smc.h>
 #include <platform_config.h>
 #include <stdint.h>
-#include "imx_pl310.h"
+#include <imx_pl310.h>
 
 #define PL310_AUX_CTRL_FLZW			BIT(0)
 #define PL310_DEBUG_CTRL_DISABLE_WRITEBACK	BIT(1)
@@ -64,6 +65,11 @@ void arm_cl2_config(vaddr_t pl310_base)
 	arm_cl2_invbyway(pl310_base);
 }
 
+bool pl310_enabled(vaddr_t pl310_base)
+{
+	return io_read32(pl310_base + PL310_CTRL) & 1;
+}
+
 void arm_cl2_enable(vaddr_t pl310_base)
 {
 	uint32_t val __maybe_unused;
diff --git a/core/arch/arm/plat-imx/imx_pl310.h b/core/arch/arm/plat-imx/imx_pl310.h
index 4fef57b2d..fe9f00dd2 100644
--- a/core/arch/arm/plat-imx/imx_pl310.h
+++ b/core/arch/arm/plat-imx/imx_pl310.h
@@ -7,6 +7,7 @@
 
 uint32_t pl310_enable(void);
 uint32_t pl310_disable(void);
+bool pl310_enabled(vaddr_t pl310_base);
 uint32_t pl310_enable_writeback(void);
 uint32_t pl310_disable_writeback(void);
 uint32_t pl310_enable_wflz(void);
diff --git a/core/arch/arm/plat-imx/main.c b/core/arch/arm/plat-imx/main.c
index 07b01f765..6e4d4cfaf 100644
--- a/core/arch/arm/plat-imx/main.c
+++ b/core/arch/arm/plat-imx/main.c
@@ -31,6 +31,9 @@
 #include <arm.h>
 #include <console.h>
 #include <drivers/gic.h>
+#ifdef CFG_IMX_TRUSTED_ARM_CE
+#include <drivers/imx_trusted_arm_ce.h>
+#endif
 #include <drivers/imx_uart.h>
 #include <imx.h>
 #include <kernel/boot.h>
@@ -38,6 +41,7 @@
 #include <mm/core_mmu.h>
 #include <platform_config.h>
 #include <stdint.h>
+#include <tee/entry_fast.h>
 
 static struct imx_uart_data console_data __nex_bss;
 
@@ -120,3 +124,27 @@ void boot_secondary_init_intc(void)
 	gic_init_per_cpu();
 }
 #endif
+
+/* Overriding the default __weak tee_entry_fast() */
+void tee_entry_fast(struct thread_smc_args *args)
+{
+	switch (args->a0) {
+#ifdef CFG_IMX_TRUSTED_ARM_CE
+	case IMX_SMC_ENCRYPT_CBC:
+		imx_smc_cipher_cbc(args, true);
+		break;
+	case IMX_SMC_DECRYPT_CBC:
+		imx_smc_cipher_cbc(args, false);
+		break;
+	case IMX_SMC_ENCRYPT_XTS:
+		imx_smc_cipher_xts(args, true);
+		break;
+	case IMX_SMC_DECRYPT_XTS:
+		imx_smc_cipher_xts(args, false);
+		break;
+#endif
+	default:
+		__tee_entry_fast(args);
+		break;
+	}
+}
diff --git a/core/arch/arm/plat-imx/platform_config.h b/core/arch/arm/plat-imx/platform_config.h
index d7ebe0d20..6af45bbdb 100644
--- a/core/arch/arm/plat-imx/platform_config.h
+++ b/core/arch/arm/plat-imx/platform_config.h
@@ -3,7 +3,7 @@
  * Copyright (C) 2015 Freescale Semiconductor, Inc.
  * Copyright (c) 2016, Wind River Systems.
  * All rights reserved.
- * Copyright 2019 NXP
+ * Copyright 2019, 2021 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -42,6 +42,8 @@
 #define STACK_ALIGNMENT			64
 #define CONSOLE_UART_BASE		(CFG_UART_BASE)
 
+#define SC_IPC_BASE_SECURE (CFG_SC_IPC_BASE)
+
 /* For i.MX6 Quad SABRE Lite and Smart Device board */
 #if defined(CFG_MX6QP) || defined(CFG_MX6Q) || defined(CFG_MX6D) || \
 	defined(CFG_MX6DL) || defined(CFG_MX6S)
diff --git a/core/arch/arm/plat-imx/registers/imx6-iomux.h b/core/arch/arm/plat-imx/registers/imx6-iomux.h
new file mode 100644
index 000000000..17ea1fa7f
--- /dev/null
+++ b/core/arch/arm/plat-imx/registers/imx6-iomux.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#ifndef __MX6_IOMUX_REGS_H__
+#define __MX6_IOMUX_REGS_H__
+
+/*
+ * Macros definition
+ */
+#define IOMUX_GPRx_OFFSET(idx)	(idx * 4)
+
+/*
+ * TrustZone Address Space Controller Select
+ */
+#define IOMUX_GPR_TZASC_ID		9
+#define BP_IOMUX_GPR_TZASC2_MUX_CTRL	1
+#define BM_IOMUX_GPR_TZASC2_MUX_CTRL	BIT32(BP_IOMUX_GPR_TZASC2_MUX_CTRL)
+#define BP_IOMUX_GPR_TZASC1_MUX_CTRL	0
+#define BM_IOMUX_GPR_TZASC1_MUX_CTRL	BIT32(BP_IOMUX_GPR_TZASC1_MUX_CTRL)
+
+/*
+ * OCRAM Configuration
+ */
+#define IOMUX_GPR_OCRAM_ID		10
+
+#define BP_IOMUX_GPR_OCRAM_TZ_ADDR	5
+#define BM_IOMUX_GPR_OCRAM_TZ_ADDR	\
+				SHIFT_U32(0x3F, BP_IOMUX_GPR_OCRAM_TZ_ADDR)
+#define BP_IOMUX_GPR_OCRAM_TZ_EN	4
+#define BM_IOMUX_GPR_OCRAM_TZ_EN	BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN)
+#define IOMUX_GPR_OCRAM_TZ_ENABLE	BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN)
+#define IOMUX_GPR_OCRAM_TZ_DISABLE	SHIFT_U32(0, BP_IOMUX_GPR_OCRAM_TZ_EN)
+
+// For MX6DL
+#define BP_IOMUX_GPR_OCRAM_TZ_ADDR_6DL	5
+#define BM_IOMUX_GPR_OCRAM_TZ_ADDR_6DL	\
+			SHIFT_U32(0x1F, BP_IOMUX_GPR_OCRAM_TZ_ADDR_6DL)
+#define BP_IOMUX_GPR_OCRAM_TZ_EN_6DL	4
+#define BM_IOMUX_GPR_OCRAM_TZ_EN_6DL	\
+			BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN_6DL)
+#define IOMUX_GPR_OCRAM_TZ_ENABLE_6DL	\
+			BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN_6DL)
+#define IOMUX_GPR_OCRAM_TZ_DISABLE_6DL	\
+			SHIFT_U32(0, BP_IOMUX_GPR_OCRAM_TZ_EN_6DL)
+
+// For MX6SX
+#define BP_IOMUX_GPR_OCRAM_TZ_ADDR_6SX	11
+#define BM_IOMUX_GPR_OCRAM_TZ_ADDR_6SX	\
+			SHIFT_U32(0x3F, BP_IOMUX_GPR_OCRAM_TZ_ADDR_6SX)
+#define BP_IOMUX_GPR_OCRAM_TZ_EN_6SX	10
+#define BM_IOMUX_GPR_OCRAM_TZ_EN_6SX	\
+			BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN_6SX)
+#define IOMUX_GPR_OCRAM_TZ_ENABLE_6SX	\
+			BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN_6SX)
+#define IOMUX_GPR_OCRAM_TZ_DISABLE_6SX	\
+			SHIFT_U32(0, BP_IOMUX_GPR_OCRAM_TZ_EN_6SX)
+
+// For MX6UL
+#define BP_IOMUX_GPR_OCRAM_TZ_ADDR_6UL	11
+#define BM_IOMUX_GPR_OCRAM_TZ_ADDR_6UL	\
+			SHIFT_U32(0x1F, BP_IOMUX_GPR_OCRAM_TZ_ADDR_6UL)
+#define BP_IOMUX_GPR_OCRAM_TZ_EN_6UL	10
+#define BM_IOMUX_GPR_OCRAM_TZ_EN_6UL	\
+			BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN_6UL)
+#define IOMUX_GPR_OCRAM_TZ_ENABLE_6UL	\
+			BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN_6UL)
+#define IOMUX_GPR_OCRAM_TZ_DISABLE_6UL	\
+			SHIFT_U32(0, BP_IOMUX_GPR_OCRAM_TZ_EN_6UL)
+
+/* The configuration is locked with register bits 16 to 29 as mirror
+ * of bits 0 to 13
+ */
+#define BP_IOMUX_GPR_OCRAM_LOCK		16
+#define IOMUX_GPR_OCRAM_LOCK(value)	\
+			SHIFT_U32(value, BP_IOMUX_GPR_OCRAM_LOCK)
+
+/*
+ * S_OCRAM Configuration
+ */
+#define IOMUX_GPR_S_OCRAM_ID	11
+
+// For MX6SX
+#define BP_IOMUX_GPR_S_OCRAM_TZ_ADDR_6SX	11
+#define BM_IOMUX_GPR_S_OCRAM_TZ_ADDR_6SX	\
+			SHIFT_U32(0x3, BP_IOMUX_GPR_S_OCRAM_TZ_ADDR_6SX)
+#define BP_IOMUX_GPR_S_OCRAM_TZ_EN_6SX		10
+#define BM_IOMUX_GPR_S_OCRAM_TZ_EN_6SX		\
+			BIT32(BP_IOMUX_GPR_S_OCRAM_TZ_EN_6SX)
+#define IOMUX_GPR_S_OCRAM_TZ_ENABLE_6SX		\
+			BIT32(BP_IOMUX_GPR_S_OCRAM_TZ_EN_6SX)
+#define IOMUX_GPR_S_OCRAM_TZ_DISABLE_6SX	\
+			SHIFT_U32(0, BP_IOMUX_GPR_S_OCRAM_TZ_EN_6SX)
+#define BP_IOMUX_GPR_S_OCRAM_L2_EN_6SX		1
+#define BM_IOMUX_GPR_S_OCRAM_L2_EN_6SX		\
+			BIT32(BP_IOMUX_GPR_S_OCRAM_L2_EN_6SX)
+
+#endif /* __MX6_IOMUX_REGS_H__ */
diff --git a/core/arch/arm/plat-imx/registers/imx6-mmdc.h b/core/arch/arm/plat-imx/registers/imx6-mmdc.h
new file mode 100644
index 000000000..1fa875dd2
--- /dev/null
+++ b/core/arch/arm/plat-imx/registers/imx6-mmdc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+#ifndef __IMX6_MMDC_REGS__
+#define __IMX6_MMDC_REGS__
+
+#if defined(CFG_MX6) || defined(CFG_MX6UL)
+#define MX6_MMDC1_OFFSET	0x0000
+#define MX6_MMDC2_OFFSET	0x4000
+
+#define MX6_MMDC_MDCTL		0x000
+#define MX6_MMDC_MDPDC		0x004
+#define MX6_MMDC_MDCFG0		0x00C
+#define MX6_MMDC_MDCFG1		0x010
+#define MX6_MMDC_MDCFG2		0x014
+#define MX6_MMDC_MDMISC		0x018
+#define MX6_MMDC_MDSCR		0x01C
+#define MX6_MMDC_MDCFG3LP   0x038
+#define MX6_MMDC_MAARCR		0x400
+#define MX6_MMDC_MAPSR		0x404
+#define MX6_MMDC_MADPCR0	0x410
+#define MX6_MMDC_MPZQHWCTRL 0x800
+#define MX6_MMDC_MPODTCTRL  0x818
+#define MX6_MMDC_MPDGCTRL0	0x83c
+#define MX6_MMDC_MPDGCTRL1	0x840
+#define MX6_MMDC_MPRDDLCTL	0x848
+#define MX6_MMDC_MPWRDLCTL	0x850
+#define MX6_MMDC_MPMUR0		0x8B8
+
+#define BM_MMDC_MDMISC_DDR_TYPE			0x18
+#define BP_MMDC_MDMISC_DDR_TYPE_SHIFT   	3
+#define IMX_DDR_TYPE_DDR3			0
+#define IMX_DDR_TYPE_LPDDR2			1
+#define IMX_DDR_TYPE_LPDDR3			2
+
+#define BP_MX6_MMDC_MPMUR0_MU_UNIT_DEL_NUM	16
+/* For iMX6SLL */
+#define IMX_MMDC_DDR_TYPE_LPDDR3 		3
+
+#endif
+
+#endif /* __MMDC_REGS__ */
diff --git a/core/arch/arm/plat-imx/registers/imx6.h b/core/arch/arm/plat-imx/registers/imx6.h
index 3889989c9..e8b613420 100644
--- a/core/arch/arm/plat-imx/registers/imx6.h
+++ b/core/arch/arm/plat-imx/registers/imx6.h
@@ -32,6 +32,8 @@
 
 #include <registers/imx6-crm.h>
 #include <registers/imx6-dcp.h>
+#include <registers/imx6-iomux.h>
+#include <registers/imx6-mmdc.h>
 
 #define UART1_BASE			0x2020000
 #define IOMUXC_BASE			0x020E0000
@@ -77,11 +79,20 @@
 
 #define SCU_BASE			0x00A00000
 #define PL310_BASE			0x00A02000
+#define PL310_SIZE			0x1000
+#define SRC_BASE			0x020D8000
 #define IRAM_BASE			0x00900000
 
+#define SECMEM_BASE			0x00100000
+#define SECMEM_SIZE			0x40000
+
 #define OCOTP_BASE			0x021BC000
 #define OCOTP_SIZE			0x4000
 
+/* on i.MX6SX 16KB */
+#define IRAM_6SX_S_BASE		0x008f8000
+#define IRAM_6SX_S_SIZE		(16 * 1024)
+
 #define GIC_BASE			0x00A00000
 #define GICD_OFFSET			0x1000
 
@@ -120,30 +131,6 @@
 #define MX6Q_CCM_CCR			0x0
 #define MX6Q_ANATOP_CORE		0x140
 
-#define IOMUXC_GPR9_OFFSET		0x24
-#define IOMUXC_GPR10_OFFSET		0x28
-
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_OFFSET	5
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_MASK		GENMASK_32(10, 5)
-
-#define IOMUXC_GPR10_OCRAM_TZ_EN_OFFSET		4
-#define IOMUXC_GPR10_OCRAM_TZ_EN_MASK		GENMASK_32(4, 4)
-
-#define IOMUXC_GPR10_OCRAM_TZ_EN_LOCK_OFFSET	20
-#define IOMUXC_GPR10_OCRAM_TZ_EN_LOCK_MASK	GENMASK_32(20, 20)
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_LOCK_OFFSET	21
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_LOCK_MASK	GENMASK_32(26, 21)
-
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_OFFSET_6UL	11
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_MASK_6UL	GENMASK_32(15, 11)
-#define IOMUXC_GPR10_OCRAM_TZ_EN_OFFSET_6UL	10
-#define IOMUXC_GPR10_OCRAM_TZ_EN_MASK_6UL	GENMASK_32(10, 10)
-
-#define IOMUXC_GPR10_OCRAM_TZ_EN_LOCK_OFFSET_6UL	26
-#define IOMUXC_GPR10_OCRAM_TZ_EN_LOCK_MASK_6UL		GENMASK_32(26, 26)
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_LOCK_OFFSET_6UL	(27)
-#define IOMUXC_GPR10_OCRAM_TZ_ADDR_LOCK_MASK_6UL	GENMASK_32(31, 27)
-
 #ifdef CFG_MX6SL
 #define DIGPROG_OFFSET	0x280
 #else
diff --git a/core/arch/arm/plat-imx/registers/imx7-ddrc.h b/core/arch/arm/plat-imx/registers/imx7-ddrc.h
new file mode 100644
index 000000000..665475776
--- /dev/null
+++ b/core/arch/arm/plat-imx/registers/imx7-ddrc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2019 NXP
+ */
+#ifndef __IMX7_DDRC_REGS__
+#define __IMX7_DDRC_REGS__
+
+#define IMX_DDR_TYPE_DDR3		BIT32(0)
+#define IMX_DDR_TYPE_LPDDR2		BIT32(2)
+#define IMX_DDR_TYPE_LPDDR3		BIT32(3)
+
+/* DDR Controller */
+#define MX7_DDRC_MSTR			0x000
+#define MX7_DDRC_STAT			0x004
+#define MX7_DDRC_MRCTRL0		0x010
+#define MX7_DDRC_MRCTRL1		0x014
+#define MX7_DDRC_MRSTAT			0x018
+#define MX7_DDRC_PWRCTL			0x030
+#define MX7_DDRC_RFSHCTL3		0x060
+#define MX7_DDRC_RFSHTMG		0x064
+#define MX7_DDRC_ZQCTL0			0x180
+#define MX7_DDRC_DFIMISC		0x1B0
+#define MX7_DDRC_DBG1			0x304
+#define MX7_DDRC_DBGCAM			0x308
+#define MX7_DDRC_SWCTL			0x320
+#define MX7_DDRC_SWSTAT			0x324
+
+/* DDR Multi Port Controller */
+#define MX7_DDRC_MP_PSTAT		0x3FC
+#define MX7_DDRC_MP_PCTRL0		0x490
+
+/* DDR PHY */
+#define MX7_DDRPHY_PHY_CON1			0x04
+#define MX7_DDRPHY_LP_CON0			0x18
+#define MX7_DDRPHY_OFFSETD_CON0		0x50
+#define MX7_DDRPHY_OFFSETR_CON0		0x20
+#define MX7_DDRPHY_OFFSETR_CON1		0x24
+#define MX7_DDRPHY_OFFSETR_CON2		0x28
+#define MX7_DDRPHY_OFFSETW_CON0		0x30
+#define MX7_DDRPHY_OFFSETW_CON1		0x34
+#define MX7_DDRPHY_OFFSETW_CON2		0x38
+#define MX7_DDRPHY_RFSHTMG			0x64
+#define MX7_DDRPHY_CA_WLDSKEW_CON0	0x6C
+#define MX7_DDRPHY_CA_DSKEW_CON0	0x7C
+#define MX7_DDRPHY_CA_DSKEW_CON1	0x80
+#define MX7_DDRPHY_CA_DSKEW_CON2	0x84
+#define MX7_DDRPHY_MDLL_CON0		0xB0
+#define MX7_DDRPHY_MDLL_CON1		0xB4
+
+#endif /* __IMX7_DDRC_REGS__ */
diff --git a/core/arch/arm/plat-imx/registers/imx7-iomux.h b/core/arch/arm/plat-imx/registers/imx7-iomux.h
new file mode 100644
index 000000000..92c4c3697
--- /dev/null
+++ b/core/arch/arm/plat-imx/registers/imx7-iomux.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#ifndef __MX7_IOMUX_H__
+#define __MX7_IOMUX_H__
+
+/*
+ * Macros definition
+ */
+#define IOMUX_GPRx_OFFSET(idx)	(idx * 4)
+
+/*
+ * DDR PHY control PD pins
+ * TrustZone Address Space Controller Select
+ */
+#define IOMUX_GPR_TZASC_ID		9
+#define BP_IOMUX_GPR_DDR_PHY_CTRL_PD	1
+#define BM_IOMUX_GPR_DDR_PHY_CTRL_PD	(0x1F << BP_IOMUX_GPR_DDR_PHY_CTRL_PD)
+#define BP_IOMUX_GPR_TZASC1_MUX_CTRL	0
+#define BM_IOMUX_GPR_TZASC1_MUX_CTRL	BIT32(BP_IOMUX_GPR_TZASC1_MUX_CTRL)
+
+/*
+ * OCRAM Configuration
+ */
+#define IOMUX_GPR_OCRAM_ID		11
+
+/* State Retention configuration */
+#define BP_IOMUX_GPR_OCRAM_S_TZ_ADDR	11
+#define BM_IOMUX_GPR_OCRAM_S_TZ_ADDR	(0x7 << BP_IOMUX_GPR_OCRAM_S_TZ_ADDR)
+#define BP_IOMUX_GPR_OCRAM_S_TZ_EN	10
+#define BM_IOMUX_GPR_OCRAM_S_TZ_EN	BIT32(BP_IOMUX_GPR_OCRAM_S_TZ_EN)
+#define IOMUX_GPR_OCRAM_S_TZ_ENABLE	BIT32(BP_IOMUX_GPR_OCRAM_S_TZ_EN)
+
+/* Running configuration */
+#define BP_IOMUX_GPR_OCRAM_TZ_ADDR	1
+#define BM_IOMUX_GPR_OCRAM_TZ_ADDR	(0x1F << BP_IOMUX_GPR_OCRAM_TZ_ADDR)
+#define BP_IOMUX_GPR_OCRAM_TZ_EN	0
+#define BM_IOMUX_GPR_OCRAM_TZ_EN	BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN)
+#define IOMUX_GPR_OCRAM_TZ_ENABLE	BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN)
+#define IOMUX_GPR_OCRAM_TZ_DISABLE	(0 << BP_IOMUX_GPR_OCRAM_TZ_EN)
+
+/* The configuration is locked with register bits 16 to 29 as mirror
+ * of bits 0 to 13
+ */
+#define BP_IOMUX_GPR_OCRAM_LOCK		16
+#define IOMUX_GPR_OCRAM_LOCK(value)	(value << BP_IOMUX_GPR_OCRAM_LOCK)
+
+#endif /* __MX7_IOMUX_H__ */
diff --git a/core/arch/arm/plat-imx/registers/imx7.h b/core/arch/arm/plat-imx/registers/imx7.h
index 6c3baf400..cefd3e950 100644
--- a/core/arch/arm/plat-imx/registers/imx7.h
+++ b/core/arch/arm/plat-imx/registers/imx7.h
@@ -6,6 +6,8 @@
 #define __IMX7_H__
 
 #include <registers/imx7-crm.h>
+#include <registers/imx7-ddrc.h>
+#include <registers/imx7-iomux.h>
 
 #define GIC_BASE		0x31000000
 #define GIC_SIZE		0x8000
@@ -47,7 +49,9 @@
 #define TZASC_BASE		0x30780000
 #define TZASC_SIZE		0x10000
 #define DDRC_PHY_BASE		0x30790000
+#define DDRC_PHY_SIZE		0x10000
 #define MMDC_P0_BASE		0x307A0000
+#define MMDC_P0_SIZE		0x10000
 #define DDRC_BASE		0x307A0000
 #define IRAM_BASE		0x00900000
 #define IRAM_S_BASE		0x00180000
@@ -58,8 +62,6 @@
 #define CSU_SETTING_LOCK	0x01000100
 #define CSU_SA			0x218
 
-#define TRUSTZONE_OCRAM_START	0x180000
-
 #define IOMUXC_GPR9_OFFSET				0x24
 #define IOMUXC_GPR9_TZASC1_MUX_CONTROL_OFFSET		0
 
diff --git a/core/arch/arm/plat-imx/registers/imx7ulp.h b/core/arch/arm/plat-imx/registers/imx7ulp.h
index a5abc506a..6988f978a 100644
--- a/core/arch/arm/plat-imx/registers/imx7ulp.h
+++ b/core/arch/arm/plat-imx/registers/imx7ulp.h
@@ -31,20 +31,31 @@
 #define TPM5_BASE		0x40260000
 #define WDOG_BASE		0x403d0000
 #define WDOG_SIZE		0x10
+#define TPM5_SIZE		0x10000
 #define SCG1_BASE		0x403e0000
+#define SCG1_SIZE		0x10000
 #define PCC2_BASE		0x403f0000
+#define PCC2_SIZE		0x10000
 #define PMC1_BASE		0x40400000
+#define PMC1_SIZE		0x1000
 #define SMC1_BASE		0x40410000
+#define SMC1_SIZE		0x1000
 #define MMDC_BASE		0x40ab0000
+#define MMDC_SIZE		0x1000
 #define IOMUXC1_BASE		0x40ac0000
+#define IOMUXC1_SIZE		0x1000
 #define MMDC_IO_BASE		0x40ad0000
+#define MMDC_IO_SIZE		0x1000
+#define PCR_BASE		0x40ae0000
+#define PCR_SIZE		0x10000
 #define PCC3_BASE		0x40b30000
+#define PCC3_SIZE		0x1000
 #define OCOTP_BASE		0x410A6000
 #define OCOTP_SIZE		0x4000
 #define PMC0_BASE		0x410a1000
+#define PMC0_SIZE		0x1000
 #define SIM_BASE		0x410a3000
-#define OCOTP_BASE		0x410A6000
-#define OCOTP_SIZE		0x4000
+#define SIM_SIZE		0x1000
 
 #define CAAM_BASE		0x40240000
 #define CAAM_SIZE		0x10000
diff --git a/core/arch/arm/plat-imx/registers/imx8q.h b/core/arch/arm/plat-imx/registers/imx8q.h
index b085e62a1..92eed87c3 100644
--- a/core/arch/arm/plat-imx/registers/imx8q.h
+++ b/core/arch/arm/plat-imx/registers/imx8q.h
@@ -15,7 +15,8 @@
 #define UART4_BASE	0x5a0a0000
 #define CAAM_BASE	0x31400000
 #define CAAM_SIZE       0x40000
-#define SC_IPC_BASE_SECURE 0x5d1b0000
+#define SC_IPC0_BASE	0x5d1b0000
+#define SC_IPC3_BASE	0x5d1e0000
 #define SC_IPC_SIZE	   0x10000
 
 #endif /* __IMX8Q_H__ */
diff --git a/core/arch/arm/plat-imx/registers/imx93.h b/core/arch/arm/plat-imx/registers/imx93.h
index e91607537..21080da82 100644
--- a/core/arch/arm/plat-imx/registers/imx93.h
+++ b/core/arch/arm/plat-imx/registers/imx93.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-2-Clause */
 /*
- * Copyright 2022 NXP
+ * Copyright 2022-2023 NXP
  */
 #ifndef __IMX93_H__
 #define __IMX93_H__
@@ -9,7 +9,13 @@
 #define GICR_BASE 0x48040000
 
 #define UART1_BASE 0x44380000
-#define MU_BASE	   0x47520000
+/*
+ * For Normal MU - Use MU_BASE as 0x47520000
+ * For Trust MU - Use MU_BASE as 0x47530000
+ */
+#define MU_BASE 0x47530000
 #define MU_SIZE	   0x10000
 
+#define MU_TRUST_BASE 0x47530000
+
 #endif /* __IMX93_H__ */
diff --git a/core/arch/arm/plat-imx/registers/imx95.h b/core/arch/arm/plat-imx/registers/imx95.h
index 30072e7fe..b0382e682 100644
--- a/core/arch/arm/plat-imx/registers/imx95.h
+++ b/core/arch/arm/plat-imx/registers/imx95.h
@@ -10,4 +10,7 @@
 
 #define UART1_BASE 0x44380000
 
+#define MU_BASE 0x47530000
+#define MU_SIZE 0x10000
+
 #endif /* __IMX95_H__ */
diff --git a/core/arch/arm/plat-imx/sm_platform_handler.c b/core/arch/arm/plat-imx/sm_platform_handler.c
index 0828891cc..1421d1e73 100644
--- a/core/arch/arm/plat-imx/sm_platform_handler.c
+++ b/core/arch/arm/plat-imx/sm_platform_handler.c
@@ -7,7 +7,8 @@
 #include <sm/optee_smc.h>
 #include <sm/sm.h>
 #include <trace.h>
-#include "imx_pl310.h"
+#include <imx_pl310.h>
+#include <drivers/pm/imx/busfreq.h>
 
 #define IMX_SIP_PL310_ENABLE			1
 #define IMX_SIP_PL310_DISABLE			2
@@ -15,6 +16,8 @@
 #define IMX_SIP_PL310_DISABLE_WRITEBACK		4
 #define IMX_SIP_PL310_ENABLE_WFLZ		5
 
+#define IMX_SIP_BUSFREQ_CHANGE 6
+
 static enum sm_handler_ret imx_sip_handler(struct thread_smc_args *smc_args)
 {
 	uint16_t sip_func = OPTEE_SMC_FUNC_NUM(smc_args->a0);
@@ -36,6 +39,11 @@ static enum sm_handler_ret imx_sip_handler(struct thread_smc_args *smc_args)
 	case IMX_SIP_PL310_ENABLE_WFLZ:
 		smc_args->a0 = pl310_enable_wflz();
 		break;
+#endif
+#ifdef CFG_BUSFREQ
+	case IMX_SIP_BUSFREQ_CHANGE:
+		smc_args->a0 = busfreq_change(smc_args->a1, smc_args->a2);
+		break;
 #endif
 	default:
 		EMSG("Invalid SIP function code: 0x%x", sip_func);
diff --git a/core/arch/arm/plat-imx/sub.mk b/core/arch/arm/plat-imx/sub.mk
index 0849d031b..f7dac7e61 100644
--- a/core/arch/arm/plat-imx/sub.mk
+++ b/core/arch/arm/plat-imx/sub.mk
@@ -2,15 +2,10 @@ global-incdirs-y += .
 srcs-y += main.c imx-common.c
 
 srcs-$(CFG_PL310) += imx_pl310.c
-ifeq ($(CFG_PSCI_ARM32),y)
-$(call force,CFG_PM_ARM32,y)
-CFG_IMX_PM ?= y
-endif
 
 ifneq (,$(filter y, $(CFG_MX6Q) $(CFG_MX6QP) $(CFG_MX6D) $(CFG_MX6DL) \
 	$(CFG_MX6S) $(CFG_MX6SL) $(CFG_MX6SLL) $(CFG_MX6SX)))
 srcs-y += a9_plat_init.S
-srcs-$(CFG_SM_PLATFORM_HANDLER) += sm_platform_handler.c
 endif
 
 ifneq (,$(filter y, $(CFG_MX7) $(CFG_MX7ULP) $(CFG_MX6UL) $(CFG_MX6ULL)))
@@ -18,3 +13,4 @@ srcs-y += a7_plat_init.S
 endif
 
 srcs-$(CFG_TZC380) += tzc380.c
+srcs-$(CFG_SM_PLATFORM_HANDLER) += sm_platform_handler.c
diff --git a/core/arch/arm/plat-ls/conf.mk b/core/arch/arm/plat-ls/conf.mk
index ae77a277c..84287c382 100644
--- a/core/arch/arm/plat-ls/conf.mk
+++ b/core/arch/arm/plat-ls/conf.mk
@@ -20,6 +20,8 @@ $(call force,CFG_DRAM0_SIZE,0x40000000)
 $(call force,CFG_CORE_CLUSTER_SHIFT,2)
 CFG_NUM_THREADS ?= 2
 CFG_SHMEM_SIZE ?= 0x00200000
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 endif
 
 ifeq ($(PLATFORM_FLAVOR),ls1043ardb)
@@ -28,6 +30,8 @@ $(call force,CFG_TEE_CORE_NB_CORE,4)
 $(call force,CFG_DRAM0_SIZE,0x80000000)
 $(call force,CFG_CORE_CLUSTER_SHIFT,2)
 CFG_SHMEM_SIZE ?= 0x00200000
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 endif
 
 ifeq ($(PLATFORM_FLAVOR),ls1046ardb)
@@ -36,6 +40,8 @@ $(call force,CFG_TEE_CORE_NB_CORE,4)
 $(call force,CFG_DRAM0_SIZE,0x80000000)
 $(call force,CFG_CORE_CLUSTER_SHIFT,2)
 CFG_SHMEM_SIZE ?= 0x00200000
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 endif
 
 ifeq ($(PLATFORM_FLAVOR),ls1088ardb)
@@ -45,6 +51,9 @@ $(call force,CFG_DRAM0_SIZE,0x80000000)
 $(call force,CFG_CORE_CLUSTER_SHIFT,2)
 $(call force,CFG_ARM_GICV3,y)
 CFG_SHMEM_SIZE ?= 0x00200000
+$(call force,CFG_NXP_CAAM,n)
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 endif
 
 ifeq ($(PLATFORM_FLAVOR),ls2088ardb)
@@ -54,6 +63,9 @@ $(call force,CFG_DRAM0_SIZE,0x80000000)
 $(call force,CFG_CORE_CLUSTER_SHIFT,1)
 $(call force,CFG_ARM_GICV3,y)
 CFG_SHMEM_SIZE ?= 0x00200000
+$(call force,CFG_NXP_CAAM,n)
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 endif
 
 ifeq ($(PLATFORM_FLAVOR),lx2160aqds)
@@ -90,6 +102,7 @@ CFG_LS_I2C ?= y
 CFG_LS_GPIO ?= y
 CFG_LS_DSPI ?= y
 CFG_SHMEM_SIZE ?= 0x00200000
+CFG_PTA_LS_I2C_RTC_TEST ?= y
 endif
 
 ifeq ($(PLATFORM_FLAVOR),ls1028ardb)
@@ -99,6 +112,8 @@ $(call force,CFG_DRAM0_SIZE,0x80000000)
 $(call force,CFG_CORE_CLUSTER_SHIFT,1)
 $(call force,CFG_ARM_GICV3,y)
 CFG_SHMEM_SIZE ?= 0x00200000
+CFG_DT ?= y
+CFG_EXTERNAL_DTB_OVERLAY = y
 endif
 
 ifeq ($(platform-flavor-armv8),1)
@@ -128,4 +143,10 @@ CFG_CRYPTO_SIZE_OPTIMIZATION ?= n
 
 # NXP CAAM support is not enabled by default and can be enabled
 # on the command line
-CFG_NXP_CAAM ?= n
+CFG_NXP_CAAM ?= y
+
+ifeq ($(CFG_NXP_CAAM),y)
+# If NXP CAAM Driver is supported, the Crypto Driver interfacing
+# it with generic crypto API can be enabled.
+CFG_CRYPTO_DRIVER ?= y
+endif
diff --git a/core/arch/arm/plat-ls/pta/i2c_rtc_test.c b/core/arch/arm/plat-ls/pta/i2c_rtc_test.c
new file mode 100644
index 000000000..113afdd5b
--- /dev/null
+++ b/core/arch/arm/plat-ls/pta/i2c_rtc_test.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2021 NXP
+ * PTA for I2C Testing on LayerScape Boards
+ */
+
+#include <drivers/ls_i2c.h>
+#include <initcall.h>
+#include <kernel/delay.h>
+#include <kernel/pseudo_ta.h>
+#include <mm/core_memprot.h>
+#include <pta_i2c_rtc_test.h>
+#include <tee_api_types.h>
+
+#define PTA_NAME "i2c_rtc_test.pta"
+
+#define PCF2129_CTRL3_BIT_BLF BIT(2) /* Battery Low Flag*/
+#define PCF2129_SLAVE_ADDRESS 0x51
+
+/*
+ * Control registers are combination of multiple bits.
+ * Please check their description from following link:
+ * https://www.nxp.com/docs/en/data-sheet/PCF2129.pdf [Sec 8.2]
+ */
+struct pcf2129_regs {
+	uint8_t control[3];
+	uint8_t seconds;
+	uint8_t minutes;
+	uint8_t hours;
+	uint8_t days;
+	uint8_t weekdays;
+	uint8_t months;
+	uint8_t years;
+	uint8_t second_alarm;
+	uint8_t minute_alarm;
+	uint8_t hour_alarm;
+	uint8_t day_alarm;
+	uint8_t weekday_alarm;
+} __packed;
+
+#if defined(PLATFORM_FLAVOR_lx2160ardb)
+/* I2c clock based on 750Mhz platform clock */
+#define I2C_CLOCK      93750000
+#define I2C_SPEED      100000
+#define I2C_CONTROLLER 4
+#endif
+
+/*
+ * RTC outputs time in BCD format and now we need to do
+ * calculation on seconds field, we need to convert it to decimal.
+ * For more information please check section 8.8 in this pdf
+ * https://www.nxp.com/docs/en/data-sheet/PCF2129.pdf
+ */
+static inline int bcd_to_decimal(uint8_t bcd)
+{
+	int dec = ((bcd & 0xF0) >> 4) * 10 + (bcd & 0x0F);
+	return dec;
+}
+
+static TEE_Result i2c_rtc_get_second(vaddr_t base, uint8_t *sec)
+{
+	struct i2c_operation operation = {};
+	unsigned int operation_count = 0;
+	uint8_t rtc_reg_adr = 0;
+	static struct pcf2129_regs pcf_regs = {};
+	uint8_t __maybe_unused days = 0;
+	uint8_t __maybe_unused hours = 0;
+	uint8_t __maybe_unused minutes = 0;
+	uint8_t seconds = 0;
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	rtc_reg_adr = offsetof(struct pcf2129_regs, control[2]);
+
+	operation_count = 1;
+	operation.flags = I2C_FLAG_WRITE;
+	operation.length_in_bytes = sizeof(rtc_reg_adr);
+	operation.buffer = &rtc_reg_adr;
+
+	res = i2c_bus_xfer(base, PCF2129_SLAVE_ADDRESS, &operation,
+			   operation_count);
+	if (res) {
+		EMSG("RTC write error at Addr, Status = %x", res);
+		goto exit;
+	}
+
+	operation_count = 1;
+	operation.flags = I2C_FLAG_READ;
+	operation.length_in_bytes = offsetof(struct pcf2129_regs,
+					     second_alarm) -
+				    offsetof(struct pcf2129_regs, control[2]);
+	operation.buffer = &pcf_regs.control[2];
+
+	res = i2c_bus_xfer(base, PCF2129_SLAVE_ADDRESS, &operation,
+			   operation_count);
+	if (res) {
+		EMSG("RTC read error at Addr, Status = %x", res);
+		goto exit;
+	}
+
+	days = bcd_to_decimal(pcf_regs.days);
+	hours = bcd_to_decimal(pcf_regs.hours);
+	minutes = bcd_to_decimal(pcf_regs.minutes);
+	seconds = bcd_to_decimal(pcf_regs.seconds);
+
+	DMSG("Days = %u, Hours = %u, Minutes = %u, Second = %u", days, hours,
+	     minutes, seconds);
+
+	if (pcf_regs.control[2] & PCF2129_CTRL3_BIT_BLF)
+		EMSG("RTC battery res low, check RTC battery");
+
+	*sec = seconds;
+
+exit:
+	return res;
+}
+
+static TEE_Result i2c_test_suite(void)
+{
+	uint8_t curr_sec = 0, prev_sec = 0;
+	uint8_t num_times = 0;
+	struct ls_i2c_data i2c_data = {};
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	DMSG("I2C RTC TEST: will get time from RTC 5 times after 2 secs");
+
+	/* set slave info */
+	i2c_data.i2c_controller = I2C_CONTROLLER;
+	i2c_data.i2c_bus_clock = I2C_CLOCK;
+	i2c_data.speed = I2C_SPEED;
+
+	/* Initialise I2C driver */
+	res = i2c_init(&i2c_data);
+	if (res) {
+		EMSG("Unable to init I2C driver");
+		goto exit;
+	}
+
+	while (num_times < 5) {
+		res = i2c_rtc_get_second(i2c_data.base, &curr_sec);
+		if (res)
+			goto exit;
+		/*
+		 * Will skip first time for saving prev_sec to
+		 * compare with current second value received
+		 * from RTC.
+		 * Also Skipping the test when minute changes.
+		 */
+		if (num_times > 0 && curr_sec > 1) {
+			/*
+			 * Comparing diff with 2 seconds.
+			 * Also taking into difference of 3 seconds
+			 * because of boundary condition and delay
+			 * due to calculation.
+			 */
+			if ((curr_sec - prev_sec) != 2 &&
+			    (curr_sec - prev_sec) != 3) {
+				EMSG("Seconds mismatch by = %u sec\n",
+				     curr_sec - prev_sec);
+				res = TEE_ERROR_GENERIC;
+				goto exit;
+			}
+		}
+		prev_sec = curr_sec;
+		/* Add delay of 2 secs and then again get time from RTC */
+		mdelay(2000);
+		num_times++;
+	}
+exit:
+	return res;
+}
+
+/*
+ * Called when a pseudo TA is invoked.
+ *
+ * sess_ctx       Session Identifier
+ * cmd_id         Command ID
+ * param_types    TEE parameters
+ * params         Buffer parameters
+ */
+static TEE_Result
+invokeCommandEntryPoint(void *sess_ctx __unused, uint32_t cmd_id,
+			uint32_t param_types __unused,
+			TEE_Param params[TEE_NUM_PARAMS] __unused)
+{
+	switch (cmd_id) {
+	case PTA_CMD_I2C_RTC_RUN_TEST_SUITE:
+		return i2c_test_suite();
+	default:
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+}
+
+pseudo_ta_register(.uuid = PTA_LS_I2C_RTC_TEST_SUITE_UUID, .name = PTA_NAME,
+		   .flags = PTA_DEFAULT_FLAGS,
+		   .invoke_command_entry_point = invokeCommandEntryPoint);
diff --git a/core/arch/arm/plat-ls/pta/sub.mk b/core/arch/arm/plat-ls/pta/sub.mk
new file mode 100644
index 000000000..5b429c7a1
--- /dev/null
+++ b/core/arch/arm/plat-ls/pta/sub.mk
@@ -0,0 +1 @@
+srcs-$(CFG_PTA_LS_I2C_RTC_TEST) += i2c_rtc_test.c
diff --git a/core/arch/arm/plat-ls/sub.mk b/core/arch/arm/plat-ls/sub.mk
index 17fcf7a0c..f81b0695a 100644
--- a/core/arch/arm/plat-ls/sub.mk
+++ b/core/arch/arm/plat-ls/sub.mk
@@ -1,3 +1,6 @@
 global-incdirs-y += .
 srcs-y += main.c
 srcs-$(CFG_ARM32_core) += plat_init.S
+
+# Build PTA
+subdirs-y += pta
diff --git a/core/drivers/crypto/caam/acipher/caam_rsa.c b/core/drivers/crypto/caam/acipher/caam_rsa.c
index 4b9b5a5c7..44f63de49 100644
--- a/core/drivers/crypto/caam/acipher/caam_rsa.c
+++ b/core/drivers/crypto/caam/acipher/caam_rsa.c
@@ -1400,7 +1400,7 @@ static TEE_Result do_caam_decrypt(struct drvcrypt_rsa_ed *rsa_data,
 	caam_dmaobj_cache_push(&msg);
 
 	/* Allocate the returned computed size when PKCS V1.5 */
-	if (operation == RSA_DECRYPT(PKCS_V1_5)) {
+	if ((operation & PROT_RSA_FMT_MASK) == PROT_RSA_FMT(PKCS_V1_5)) {
 		retstatus = caam_alloc_align_buf(&size_msg, 4);
 		if (retstatus != CAAM_NO_ERROR)
 			goto exit_decrypt;
@@ -1546,7 +1546,7 @@ static TEE_Result do_caam_decrypt(struct drvcrypt_rsa_ed *rsa_data,
 
 	caam_desc_add_word(desc, operation);
 
-	if (operation == RSA_DECRYPT(PKCS_V1_5)) {
+	if ((operation & PROT_RSA_FMT_MASK) == PROT_RSA_FMT(PKCS_V1_5)) {
 		/* Get the PPKCS1 v1.5 Message length generated */
 		caam_desc_add_word(desc,
 				   ST_NOIMM_OFF(CLASS_DECO, REG_MATH0, 4, 4));
@@ -1578,11 +1578,12 @@ static TEE_Result do_caam_decrypt(struct drvcrypt_rsa_ed *rsa_data,
 		goto exit_decrypt;
 	}
 
-	if (operation == RSA_DECRYPT(NO) &&
+	if ((operation & PROT_RSA_FMT_MASK) == PROT_RSA_FMT(NO) &&
 	    rsa_data->rsa_id == DRVCRYPT_RSA_NOPAD) {
 		rsa_data->message.length = caam_dmaobj_copy_ltrim_to_orig(&msg);
 	} else {
-		if (operation == RSA_DECRYPT(PKCS_V1_5)) {
+		if ((operation & PROT_RSA_FMT_MASK) ==
+		    PROT_RSA_FMT(PKCS_V1_5)) {
 			/* PKCS 1 v1.5 */
 			cache_operation(TEE_CACHEINVALIDATE, size_msg.data,
 					size_msg.length);
diff --git a/core/drivers/crypto/caam/ae/caam_ae.c b/core/drivers/crypto/caam/ae/caam_ae.c
new file mode 100644
index 000000000..1c76e3ed9
--- /dev/null
+++ b/core/drivers/crypto/caam/ae/caam_ae.c
@@ -0,0 +1,1050 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2024 NXP
+ */
+#include <caam_ae.h>
+#include <caam_common.h>
+#include <caam_io.h>
+#include <caam_jr.h>
+#include <caam_status.h>
+#include <caam_utils_mem.h>
+#include <caam_utils_status.h>
+#include <drvcrypt.h>
+#include <drvcrypt_authenc.h>
+#include <mm/core_memprot.h>
+#include <tee_api_defines.h>
+#include <tee_api_types.h>
+#include <tee/cache.h>
+#include <utee_defines.h>
+#include <utee_types.h>
+
+#include "local.h"
+
+#define MAX_DESC_ENTRIES 64
+
+/*
+ * Constants definition of the AES algorithm
+ */
+static const struct cipheralg aes_alg[] = {
+#if defined(CFG_NXP_CAAM_AE_CCM_DRV)
+	[TEE_CHAIN_MODE_CCM] = {
+		.type = OP_ALGO(AES) | ALGO_AAI(AES_CCM),
+		.size_block = TEE_AES_BLOCK_SIZE,
+		.size_ctx = 7 * sizeof(uint64_t),
+		.ctx_offset = 0,
+		.def_key = { .min = 16, .max = 32, .mod = 8 },
+		.initialize = caam_ae_initialize_ccm,
+		.final = caam_ae_final_ccm,
+	},
+#endif
+#if defined(CFG_NXP_CAAM_AE_GCM_DRV)
+	[TEE_CHAIN_MODE_GCM] = {
+		.type = OP_ALGO(AES) | ALGO_AAI(AES_GCM),
+		.size_block = TEE_AES_BLOCK_SIZE,
+		.size_ctx = 8 * sizeof(uint64_t),
+		.ctx_offset = 0,
+		.def_key = { .min = 16, .max = 32, .mod = 8 },
+		.initialize = caam_ae_initialize_gcm,
+		.final = caam_ae_final_gcm,
+	},
+#endif
+};
+
+/*
+ * Checks if the algorithm @algo is supported and returns the
+ * local algorithm entry in the corresponding cipher array
+ */
+static const struct cipheralg *get_cipheralgo(uint32_t algo)
+{
+	unsigned int algo_id = TEE_ALG_GET_MAIN_ALG(algo);
+	unsigned int algo_md = TEE_ALG_GET_CHAIN_MODE(algo);
+	const struct cipheralg *ca = NULL;
+
+	AE_TRACE("Algo id:%" PRId32 " md:%" PRId32, algo_id, algo_md);
+
+	switch (algo_id) {
+	case TEE_MAIN_ALGO_AES:
+		if (algo_md < ARRAY_SIZE(aes_alg))
+			ca = &aes_alg[algo_md];
+		break;
+
+	default:
+		break;
+	}
+
+	if (ca && ca->type)
+		return ca;
+
+	return NULL;
+}
+
+/*
+ * Allocate the SW cipher data context
+ *
+ * @ctx   [out] Caller context variable
+ * @algo  Algorithm ID of the context
+ */
+static TEE_Result caam_ae_allocate(void **ctx, uint32_t algo)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	struct caam_ae_ctx *caam_ctx = NULL;
+	const struct cipheralg *alg = NULL;
+
+	assert(ctx);
+
+	alg = get_cipheralgo(algo);
+	if (!alg) {
+		AE_TRACE("Algorithm not implemented");
+		return TEE_ERROR_NOT_IMPLEMENTED;
+	}
+
+	caam_ctx = caam_calloc(sizeof(*caam_ctx));
+	if (!caam_ctx)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	caam_ctx->descriptor = caam_calloc_desc(MAX_DESC_ENTRIES);
+	if (!caam_ctx->descriptor) {
+		ret = TEE_ERROR_OUT_OF_MEMORY;
+		goto err;
+	}
+
+	/* Setup the Algorithm pointer */
+	caam_ctx->alg = alg;
+	/* Initialize the block buffer */
+	caam_ctx->blockbuf.max = caam_ctx->alg->size_block;
+
+	*ctx = caam_ctx;
+
+	return TEE_SUCCESS;
+err:
+	caam_free_desc(&caam_ctx->descriptor);
+	caam_free(caam_ctx);
+
+	return ret;
+}
+
+/*
+ * Free the internal cipher data context
+ *
+ * @ctx    Caller context variable or NULL
+ */
+static void caam_ae_free(void *ctx)
+{
+	struct caam_ae_ctx *caam_ctx = NULL;
+
+	assert(ctx);
+
+	caam_ctx = (struct caam_ae_ctx *)ctx;
+
+	caam_free_desc(&caam_ctx->descriptor);
+	caam_free_buf(&caam_ctx->key);
+	caam_free_buf(&caam_ctx->nonce);
+	caam_free_buf(&caam_ctx->ctx);
+	caam_free_buf(&caam_ctx->initial_ctx);
+	caam_free_buf(&caam_ctx->buf_aad.buf);
+	caam_free_buf(&caam_ctx->blockbuf.buf);
+	caam_free(caam_ctx);
+}
+
+/*
+ * Initialization of the cipher operation
+ *
+ * @dinit  Data initialization object
+ */
+static TEE_Result caam_ae_initialize(struct drvcrypt_authenc_init *dinit)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_ae_ctx *caam_ctx = NULL;
+
+	assert(dinit);
+
+	if (dinit->aad_len >= AAD_LENGTH_OVERFLOW)
+		return TEE_ERROR_NOT_SUPPORTED;
+
+	caam_ctx = (struct caam_ae_ctx *)dinit->ctx;
+	if (!caam_ctx)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	caam_ctx->encrypt = dinit->encrypt;
+	caam_ctx->aad_length = dinit->aad_len;
+	caam_ctx->payload_length = dinit->payload_len;
+	caam_ctx->tag_length = dinit->tag_len;
+
+	if (dinit->key.data && dinit->key.length) {
+		retstatus = caam_cpy_buf_src(&caam_ctx->key, dinit->key.data,
+					     dinit->key.length);
+		AE_TRACE("Copy key returned 0x%" PRIx32, retstatus);
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto err;
+		}
+	}
+
+	caam_ctx->blockbuf.filled = 0;
+	caam_ctx->buf_aad.filled = 0;
+
+	ret = caam_ctx->alg->initialize(dinit);
+	if (ret)
+		goto err;
+
+	return TEE_SUCCESS;
+err:
+	caam_free_buf(&caam_ctx->key);
+
+	return ret;
+}
+
+/*
+ * Update Additional Authenticated Data part of the authenc operation
+ *
+ * @dupdate  Additional Authenticated Data update object
+ */
+static TEE_Result
+caam_ae_update_aad(struct drvcrypt_authenc_update_aad *dupdate)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_ae_ctx *caam_ctx = NULL;
+	struct caambuf aad = {};
+
+	assert(dupdate);
+
+	caam_ctx = (struct caam_ae_ctx *)dupdate->ctx;
+	if (!caam_ctx)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (dupdate->aad.data) {
+		retstatus = caam_cpy_buf_src(&aad, dupdate->aad.data,
+					     dupdate->aad.length);
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto out;
+		}
+
+		/* Initialize the AAD buffer if not already done */
+		if (!caam_ctx->buf_aad.max)
+			caam_ctx->buf_aad.max = dupdate->aad.length;
+
+		retstatus = caam_cpy_block_src(&caam_ctx->buf_aad, &aad, 0);
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto out;
+		}
+	}
+
+	ret = TEE_SUCCESS;
+out:
+	caam_free_buf(&aad);
+	return ret;
+}
+
+/*
+ * Update of the cipher operation. Call the algorithm update
+ * function associated.
+ *
+ * @dupdate  Data update object
+ */
+static TEE_Result
+caam_ae_update_payload(struct drvcrypt_authenc_update_payload *dupdate)
+{
+	struct caam_ae_ctx *caam_ctx = NULL;
+
+	assert(dupdate);
+
+	caam_ctx = (struct caam_ae_ctx *)dupdate->ctx;
+	if (!caam_ctx)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	return caam_ae_do_update(caam_ctx, &dupdate->src, &dupdate->dst, false);
+}
+
+/*
+ * Last cipher update operation. Call the algorithm final
+ * function associated.
+ *
+ * @dfinal  Data final object
+ */
+static TEE_Result caam_ae_final(struct drvcrypt_authenc_final *dfinal)
+{
+	struct caam_ae_ctx *caam_ctx = NULL;
+	TEE_Result ret = TEE_ERROR_BAD_PARAMETERS;
+
+	assert(dfinal);
+
+	caam_ctx = (struct caam_ae_ctx *)dfinal->ctx;
+	if (!caam_ctx)
+		return ret;
+
+	ret = caam_ctx->alg->final(dfinal);
+
+	caam_free_buf(&caam_ctx->nonce);
+	caam_free_buf(&caam_ctx->ctx);
+	caam_free_buf(&caam_ctx->initial_ctx);
+	caam_free_buf(&caam_ctx->buf_aad.buf);
+	caam_free_buf(&caam_ctx->blockbuf.buf);
+
+	return ret;
+}
+
+/*
+ * Finalize of the cipher operation
+ *
+ * @ctx    Caller context variable or NULL
+ */
+static void caam_ae_finalize(void *ctx __unused)
+{
+}
+
+/*
+ * Copy software Context
+ *
+ * @dst_ctx  [out] Reference the context destination
+ * @src_ctx  Reference the context source
+ */
+static void caam_ae_copy_state(void *dst_ctx, void *src_ctx)
+{
+	struct caam_ae_ctx *dst = dst_ctx;
+	struct caam_ae_ctx *src = src_ctx;
+
+	if (!dst || !src)
+		return;
+
+	AE_TRACE("Copy State context (%p) to (%p)", src_ctx, dst_ctx);
+
+	dst->alg = src->alg;
+	dst->encrypt = src->encrypt;
+	dst->aad_length = src->aad_length;
+	dst->tag_length = src->tag_length;
+	dst->payload_length = src->payload_length;
+	dst->buf_aad.max = src->buf_aad.max;
+	dst->do_block = src->do_block;
+
+	caam_free_buf(&dst->key);
+	caam_free_buf(&dst->nonce);
+	caam_free_buf(&dst->ctx);
+	caam_free_buf(&dst->initial_ctx);
+	caam_free_buf(&dst->buf_aad.buf);
+	caam_free_buf(&dst->blockbuf.buf);
+	dst->buf_aad.filled = 0;
+	dst->blockbuf.filled = 0;
+
+	if (src->blockbuf.filled) {
+		struct caambuf srcdata = { .data = src->blockbuf.buf.data,
+					   .length = src->blockbuf.filled };
+		caam_cpy_block_src(&dst->blockbuf, &srcdata, 0);
+	}
+
+	if (src->buf_aad.filled) {
+		struct caambuf srcdata = { .data = src->buf_aad.buf.data,
+					   .length = src->buf_aad.filled };
+		caam_cpy_block_src(&dst->buf_aad, &srcdata, 0);
+	}
+
+	if (src->key.length)
+		caam_cpy_buf_src(&dst->key, src->key.data, src->key.length);
+
+	if (src->ctx.length)
+		caam_cpy_buf_src(&dst->ctx, src->ctx.data, src->ctx.length);
+
+	if (src->initial_ctx.length)
+		caam_cpy_buf_src(&dst->initial_ctx, src->initial_ctx.data,
+				 src->initial_ctx.length);
+
+	if (src->nonce.length)
+		caam_cpy_buf_src(&dst->nonce, src->nonce.data,
+				 src->nonce.length);
+}
+
+/*
+ * Registration of the Authentication Encryption Driver
+ */
+static struct drvcrypt_authenc driver_ae = {
+	.alloc_ctx = &caam_ae_allocate,
+	.free_ctx = &caam_ae_free,
+	.init = &caam_ae_initialize,
+	.update_aad = &caam_ae_update_aad,
+	.update_payload = &caam_ae_update_payload,
+	.enc_final = &caam_ae_final,
+	.dec_final = &caam_ae_final,
+	.final = &caam_ae_finalize,
+	.copy_state = &caam_ae_copy_state,
+};
+
+/*
+ * Init descriptor with a cipher key
+ *
+ * @caam_ctx  Reference the AE cipher context
+ */
+static void init_descriptor(struct caam_ae_ctx *caam_ctx)
+{
+	uint32_t *desc = NULL;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+
+	caam_desc_init(desc);
+	caam_desc_add_word(desc, DESC_HEADER(0));
+
+	/* Build the descriptor */
+	caam_desc_add_word(desc,
+			   LD_KEY_PLAIN(CLASS_1, REG, caam_ctx->key.length));
+	caam_desc_add_ptr(desc, caam_ctx->key.paddr);
+}
+
+/*
+ * Init descriptor with an initial context
+ *
+ * @caam_ctx  Reference the AE cipher context
+ */
+static void add_initial_context(struct caam_ae_ctx *caam_ctx)
+{
+	uint32_t *desc = NULL;
+	size_t length = 0;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+	length = caam_ctx->initial_ctx.length;
+
+	if (length) {
+		caam_desc_add_word(desc,
+				   LD_NOIMM_OFF(CLASS_1, REG_CTX, length, 0));
+		caam_desc_add_ptr(desc, caam_ctx->initial_ctx.paddr);
+
+		/* Ensure Context register data are not in cache */
+		cache_operation(TEE_CACHECLEAN, caam_ctx->initial_ctx.data,
+				length);
+	}
+}
+
+/*
+ * Set descriptor with a saved CAAM context
+ *
+ * @caam_ctx  Reference the AE cipher context
+ */
+static void load_context(struct caam_ae_ctx *caam_ctx)
+{
+	uint32_t *desc = NULL;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+
+	caam_desc_add_word(desc,
+			   LD_NOIMM_OFF(CLASS_1, REG_CTX, caam_ctx->ctx.length,
+					caam_ctx->alg->ctx_offset));
+	caam_desc_add_ptr(desc, caam_ctx->ctx.paddr);
+}
+
+/*
+ * Set descriptor to saved CAAM context
+ *
+ * @caam_ctx  Reference the AE cipher context
+ */
+static void store_context(struct caam_ae_ctx *caam_ctx)
+{
+	uint32_t *desc = NULL;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+
+	/* Store the context */
+	caam_desc_add_word(desc,
+			   ST_NOIMM_OFF(CLASS_1, REG_CTX, caam_ctx->ctx.length,
+					caam_ctx->alg->ctx_offset));
+	caam_desc_add_ptr(desc, caam_ctx->ctx.paddr);
+
+	/* Ensure Context register data are not in cache */
+	cache_operation(TEE_CACHECLEAN, caam_ctx->ctx.data,
+			caam_ctx->ctx.length);
+}
+
+/*
+ * Cipher operation and generates a message authentication
+ *
+ * @caam_ctx AE Cipher context
+ * @encrypt  Encrypt or decrypt direction
+ * @src      Source data to encrypt/decrypt
+ * @dst      [out] Destination data encrypted/decrypted
+ * @aad      Additional Authenticated data
+ */
+static enum caam_status caam_ae_do_oneshot(struct caam_ae_ctx *caam_ctx,
+					   bool encrypt, struct caamdmaobj *src,
+					   struct caamdmaobj *dst,
+					   struct caamdmaobj *aad)
+{
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_jobctx jobctx = {};
+	uint32_t *desc = NULL;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+
+	init_descriptor(caam_ctx);
+
+	add_initial_context(caam_ctx);
+
+	AE_TRACE("Init/Final operation");
+
+	/* Operation with the direction */
+	caam_desc_add_word(desc,
+			   CIPHER_INITFINAL(caam_ctx->alg->type, encrypt));
+
+	if (!caam_ctx->ctx.data) {
+		retstatus = caam_alloc_align_buf(&caam_ctx->ctx,
+						 caam_ctx->alg->size_ctx);
+		if (retstatus)
+			return retstatus;
+	}
+
+	if (caam_ctx->nonce.data) {
+		if (!src && !aad)
+			caam_desc_add_word(desc,
+					   FIFO_LD(CLASS_1, IV, LAST_C1,
+						   caam_ctx->nonce.length));
+		else
+			caam_desc_add_word(desc,
+					   FIFO_LD(CLASS_1, IV, FLUSH,
+						   caam_ctx->nonce.length));
+		caam_desc_add_ptr(desc, caam_ctx->nonce.paddr);
+
+		/* Ensure Nonce data are not in cache */
+		cache_operation(TEE_CACHECLEAN, caam_ctx->nonce.data,
+				caam_ctx->nonce.length);
+	}
+
+	if (aad) {
+		if (!src)
+			caam_desc_fifo_load(desc, aad, CLASS_1, AAD, LAST_C1);
+		else
+			caam_desc_fifo_load(desc, aad, CLASS_1, AAD, FLUSH);
+		caam_dmaobj_cache_push(aad);
+	}
+
+	/* Load the source data if any */
+	if (src) {
+		caam_desc_fifo_load(desc, src, CLASS_1, MSG, LAST_C1);
+		caam_dmaobj_cache_push(src);
+	}
+
+	/* Store the output data if any */
+	if (dst) {
+		caam_desc_fifo_store(desc, dst, MSG_DATA);
+		caam_dmaobj_cache_push(dst);
+	}
+
+	store_context(caam_ctx);
+
+	AE_DUMPDESC(desc);
+
+	jobctx.desc = desc;
+	retstatus = caam_jr_enqueue(&jobctx, NULL);
+	if (retstatus) {
+		AE_TRACE("CAAM return 0x%08x Status 0x%08" PRIx32,
+			 retstatus, jobctx.status);
+		retstatus = CAAM_FAILURE;
+	}
+
+	/* Ensure Context register data are not in cache */
+	cache_operation(TEE_CACHEINVALIDATE, caam_ctx->ctx.data,
+			caam_ctx->ctx.length);
+
+	return retstatus;
+}
+
+/*
+ * Init cipher operation
+ *
+ * @caam_ctx AE Cipher context
+ * @encrypt  Encrypt or decrypt direction
+ * @aad      Additional Authenticated data
+ */
+static enum caam_status caam_ae_do_init(struct caam_ae_ctx *caam_ctx,
+					bool encrypt, struct caamdmaobj *aad)
+{
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_jobctx jobctx = {};
+	uint32_t *desc = NULL;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+
+	init_descriptor(caam_ctx);
+
+	add_initial_context(caam_ctx);
+
+	AE_TRACE("Init operation");
+
+	/* Operation with the direction */
+	caam_desc_add_word(desc, CIPHER_INIT(caam_ctx->alg->type, encrypt));
+
+	if (!caam_ctx->ctx.data) {
+		retstatus = caam_alloc_align_buf(&caam_ctx->ctx,
+						 caam_ctx->alg->size_ctx);
+		if (retstatus)
+			return retstatus;
+	}
+
+	if (caam_ctx->nonce.data) {
+		if (!aad)
+			caam_desc_add_word(desc,
+					   FIFO_LD(CLASS_1, IV, LAST_C1,
+						   caam_ctx->nonce.length));
+		else
+			caam_desc_add_word(desc,
+					   FIFO_LD(CLASS_1, IV, FLUSH,
+						   caam_ctx->nonce.length));
+		caam_desc_add_ptr(desc, caam_ctx->nonce.paddr);
+
+		/* Ensure Nonce data are not in cache */
+		cache_operation(TEE_CACHECLEAN, caam_ctx->nonce.data,
+				caam_ctx->nonce.length);
+	}
+
+	if (aad) {
+		caam_desc_fifo_load(desc, aad, CLASS_1, AAD, LAST_C1);
+		caam_dmaobj_cache_push(aad);
+	} else if (!caam_ctx->nonce.data) {
+		/* Required for null aad (initialize nonce only) */
+		caam_desc_add_word(desc, FIFO_LD_IMM(CLASS_1, AAD, LAST_C1, 0));
+	}
+
+	store_context(caam_ctx);
+
+	AE_DUMPDESC(desc);
+
+	jobctx.desc = desc;
+	retstatus = caam_jr_enqueue(&jobctx, NULL);
+	if (retstatus) {
+		AE_TRACE("CAAM return 0x%08x Status 0x%08" PRIx32,
+			 retstatus, jobctx.status);
+		retstatus = CAAM_FAILURE;
+	}
+
+	/* Ensure Context register data are not in cache */
+	cache_operation(TEE_CACHEINVALIDATE, caam_ctx->ctx.data,
+			caam_ctx->ctx.length);
+
+	return retstatus;
+}
+
+/*
+ * Update cipher operation and generates a message authentication
+ * on the last update
+ *
+ * @caam_ctx AE Cipher context
+ * @savectx  Save or not the context
+ * @encrypt  Encrypt or decrypt direction
+ * @src      Source data to encrypt/decrypt
+ * @dst      [out] Destination data encrypted/decrypted
+ * @final    Final AES block flag
+ */
+static enum caam_status caam_ae_do_block(struct caam_ae_ctx *caam_ctx,
+					 bool savectx, bool encrypt,
+					 struct caamdmaobj *src,
+					 struct caamdmaobj *dst, bool final)
+{
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_jobctx jobctx = {};
+	uint32_t *desc = NULL;
+
+	assert(caam_ctx);
+
+	desc = caam_ctx->descriptor;
+
+	if (!caam_ctx->ctx.length)
+		return CAAM_NOT_INIT;
+
+	init_descriptor(caam_ctx);
+
+	load_context(caam_ctx);
+
+	if (!caam_ctx->do_block ||
+	    !caam_ctx->do_block(caam_ctx, encrypt, src, dst, final)) {
+		if (final)
+			caam_desc_add_word(desc,
+					   CIPHER_FINAL(caam_ctx->alg->type,
+							encrypt));
+		else
+			caam_desc_add_word(desc,
+					   CIPHER_UPDATE(caam_ctx->alg->type,
+							 encrypt));
+
+		/* Load the source data if any */
+		if (src) {
+			caam_desc_fifo_load(desc, src, CLASS_1, MSG, LAST_C1);
+			caam_dmaobj_cache_push(src);
+		} else {
+			/*
+			 * Add the input data of 0 bytes to start
+			 * algorithm by setting the input data size
+			 */
+			caam_desc_add_word(desc,
+					   FIFO_LD(CLASS_1, MSG, LAST_C1, 0));
+			caam_desc_add_ptr(desc, 0);
+		}
+
+		/* Store the output data if any */
+		if (dst) {
+			caam_desc_fifo_store(desc, dst, MSG_DATA);
+			caam_dmaobj_cache_push(dst);
+		}
+	}
+
+	if (savectx)
+		store_context(caam_ctx);
+
+	AE_DUMPDESC(desc);
+
+	jobctx.desc = desc;
+	retstatus = caam_jr_enqueue(&jobctx, NULL);
+	if (retstatus) {
+		AE_TRACE("CAAM return 0x%08x Status 0x%08" PRIx32,
+			 retstatus, jobctx.status);
+		retstatus = CAAM_FAILURE;
+	}
+
+	/* Ensure Context register data are not in cache */
+	if (savectx)
+		cache_operation(TEE_CACHEINVALIDATE, caam_ctx->ctx.data,
+				caam_ctx->ctx.length);
+
+	return retstatus;
+}
+
+TEE_Result caam_ae_do_update(struct caam_ae_ctx *caam_ctx,
+			     struct drvcrypt_buf *src, struct drvcrypt_buf *dst,
+			     bool last)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caamdmaobj caam_src = {};
+	struct caamdmaobj caam_dst = {};
+	struct caamdmaobj caam_aad = {};
+	struct caamdmaobj *caam_aad_ptr = NULL;
+	struct caamblock trash_bck = {};
+	size_t full_size = 0;
+	size_t size_topost = 0;
+	size_t size_todo = 0;
+	size_t size_done = 0;
+	size_t size_inmade = 0;
+	size_t offset = 0;
+	bool do_init = false;
+
+	if (!caam_ctx || !src || !dst)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	AE_TRACE("Length=%zu - %s", src->length,
+		 caam_ctx->encrypt ? "Encrypt" : "Decrypt");
+
+	do_init = (caam_ctx->ctx.length == 0);
+
+	/*
+	 * According to the TEE API function TEE_AEUpdateAAD
+	 * Additional Authenticated data buffer could only be loaded
+	 * at Init state
+	 */
+	if (do_init && caam_ctx->buf_aad.filled) {
+		size_t aad_length = caam_ctx->buf_aad.filled;
+
+		ret = caam_dmaobj_init_input(&caam_aad,
+					     caam_ctx->buf_aad.buf.data,
+					     aad_length);
+		if (ret)
+			goto end_cipher;
+
+		ret = caam_dmaobj_prepare(&caam_aad, NULL, aad_length);
+		if (ret)
+			goto end_cipher;
+
+		ret = caam_dmaobj_sgtbuf_build(&caam_aad, &aad_length, 0,
+					       aad_length);
+		if (ret)
+			goto end_cipher;
+
+		if (aad_length != caam_ctx->buf_aad.filled) {
+			ret = TEE_ERROR_GENERIC;
+			goto end_cipher;
+		}
+
+		caam_aad_ptr = &caam_aad;
+	}
+
+	/*
+	 * Calculate the total data to be handled
+	 * which is data saved to complete the previous buffer
+	 * plus actual buffer length
+	 */
+	full_size = caam_ctx->blockbuf.filled + src->length;
+	if (!last) {
+		if (full_size < caam_ctx->alg->size_block) {
+			size_topost = src->length;
+			dst->length = 0;
+			goto end_cipher_post;
+		} else {
+			size_topost = full_size % caam_ctx->alg->size_block;
+			size_inmade = src->length - size_topost;
+			/* Total size that is a cipher block multiple */
+			size_todo = full_size - size_topost;
+		}
+	} else {
+		/* Last total size that is the remaining data */
+		size_todo = full_size;
+	}
+
+	AE_TRACE("FullSize %zu - posted %zu - todo %zu", full_size,
+		 size_topost, size_todo);
+
+	if (!size_todo) {
+		if (!last) {
+			ret = TEE_SUCCESS;
+			goto end_cipher_post;
+		} else if (do_init) {
+			retstatus = caam_ae_do_oneshot(caam_ctx,
+						       caam_ctx->encrypt, NULL,
+						       NULL, caam_aad_ptr);
+
+			ret = caam_status_to_tee_result(retstatus);
+
+			/* Nothing to post on last update operation */
+			goto end_cipher;
+		} else {
+			retstatus = caam_ae_do_block(caam_ctx, true,
+						     caam_ctx->encrypt, NULL,
+						     NULL, true);
+
+			ret = caam_status_to_tee_result(retstatus);
+
+			/* Nothing to post on last update operation */
+			goto end_cipher;
+		}
+	}
+
+	if (src->length) {
+		ret = caam_dmaobj_init_input(&caam_src, src->data, src->length);
+		if (ret)
+			goto end_cipher;
+	} else {
+		/* Init the buffer with saved data */
+		ret = caam_dmaobj_init_input(&caam_src,
+					     caam_ctx->blockbuf.buf.data,
+					     caam_ctx->blockbuf.filled);
+		if (ret)
+			goto end_cipher;
+
+		caam_ctx->blockbuf.filled = 0;
+	}
+
+	ret = caam_dmaobj_init_output(&caam_dst, dst->data, dst->length,
+				      size_todo);
+	if (ret)
+		goto end_cipher;
+
+	ret = caam_dmaobj_prepare(&caam_src, &caam_dst, size_todo);
+	if (ret)
+		goto end_cipher;
+
+	/* Check if there is some data saved to complete the buffer */
+	if (caam_ctx->blockbuf.filled) {
+		ret = caam_dmaobj_add_first_block(&caam_src,
+						  &caam_ctx->blockbuf);
+		if (ret)
+			goto end_cipher;
+
+		ret = caam_dmaobj_add_first_block(&caam_dst,
+						  &caam_ctx->blockbuf);
+		if (ret)
+			goto end_cipher;
+
+		caam_ctx->blockbuf.filled = 0;
+	}
+
+	if (do_init) {
+		retstatus = caam_ae_do_init(caam_ctx, caam_ctx->encrypt,
+					    caam_aad_ptr);
+
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto end_cipher;
+		}
+		do_init = false;
+	}
+
+	size_done = size_todo;
+	dst->length = 0;
+	for (offset = 0; size_todo;
+	     offset += size_done, size_todo -= size_done) {
+		AE_TRACE("Do input %zu bytes, offset %zu", size_done, offset);
+
+		ret = caam_dmaobj_sgtbuf_inout_build(&caam_src, &caam_dst,
+						     &size_done, offset,
+						     size_todo);
+		if (ret)
+			goto end_cipher;
+
+		/* is it last update and last block ? */
+		if (last && size_todo == size_done)
+			retstatus = caam_ae_do_block(caam_ctx, true,
+						     caam_ctx->encrypt,
+						     &caam_src, &caam_dst,
+						     true);
+		else
+			retstatus = caam_ae_do_block(caam_ctx, true,
+						     caam_ctx->encrypt,
+						     &caam_src, &caam_dst,
+						     false);
+
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto end_cipher;
+		}
+
+		dst->length += caam_dmaobj_copy_to_orig(&caam_dst);
+	}
+
+end_cipher_post:
+	if (size_topost) {
+		/*
+		 * Save the input data in the block buffer for next operation
+		 * and prepare the source DMA Object with the overall saved
+		 * data to generate destination bytes.
+		 */
+		struct caambuf cpysrc = { .data = src->data,
+					  .length = src->length };
+
+		caam_dmaobj_free(&caam_src);
+		caam_dmaobj_free(&caam_dst);
+		AE_TRACE("Save input data %zu bytes (done %zu) - off %zu",
+			 size_topost, size_inmade, offset);
+
+		size_todo = size_topost + caam_ctx->blockbuf.filled;
+
+		/*
+		 * Prepare the destination DMA Object:
+		 *  - Use given destination parameter bytes to return
+		 *  - If the previous operation saved data, use a trash
+		 *    buffer to do the operation but don't use unneeded data.
+		 */
+		ret = caam_dmaobj_init_output(&caam_dst,
+					      dst->data + size_inmade,
+					      size_topost, size_topost);
+		if (ret)
+			goto end_cipher;
+
+		ret = caam_dmaobj_prepare(NULL, &caam_dst,
+					  caam_ctx->alg->size_block);
+		if (ret)
+			goto end_cipher;
+
+		if (caam_ctx->blockbuf.filled) {
+			/*
+			 * Because there are some bytes to trash, use
+			 * a block buffer that will be added to the
+			 * destination SGT/Buffer structure to do the
+			 * cipher operation.
+			 */
+			ret = caam_alloc_align_buf(&trash_bck.buf,
+						   caam_ctx->blockbuf.filled);
+			if (ret != CAAM_NO_ERROR) {
+				AE_TRACE("Allocation Trash Block error");
+				goto end_cipher;
+			}
+			trash_bck.filled = caam_ctx->blockbuf.filled;
+
+			ret = caam_dmaobj_add_first_block(&caam_dst,
+							  &trash_bck);
+			if (ret)
+				goto end_cipher;
+		}
+
+		retstatus = caam_cpy_block_src(&caam_ctx->blockbuf, &cpysrc,
+					       size_inmade);
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto end_cipher;
+		}
+
+		ret = caam_dmaobj_init_input(&caam_src,
+					     caam_ctx->blockbuf.buf.data,
+					     caam_ctx->blockbuf.filled);
+		if (ret)
+			goto end_cipher;
+
+		ret = caam_dmaobj_prepare(&caam_src, NULL,
+					  caam_ctx->alg->size_block);
+		if (ret)
+			goto end_cipher;
+
+		/*
+		 * Build input and output DMA Object with the same size.
+		 */
+		size_done = size_todo;
+		ret = caam_dmaobj_sgtbuf_inout_build(&caam_src, &caam_dst,
+						     &size_done, 0, size_todo);
+		if (ret)
+			goto end_cipher;
+
+		if (size_todo != size_done) {
+			AE_TRACE("Invalid end streaming size %zu vs %zu",
+				 size_done, size_todo);
+			ret = TEE_ERROR_GENERIC;
+			goto end_cipher;
+		}
+
+		if (do_init) {
+			retstatus = caam_ae_do_init(caam_ctx, caam_ctx->encrypt,
+						    caam_aad_ptr);
+
+			if (retstatus) {
+				ret = caam_status_to_tee_result(retstatus);
+				goto end_cipher;
+			}
+		}
+
+		retstatus = caam_ae_do_block(caam_ctx, false, caam_ctx->encrypt,
+					     &caam_src, &caam_dst, false);
+
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto end_cipher;
+		}
+
+		dst->length += caam_dmaobj_copy_to_orig(&caam_dst);
+
+		AE_DUMPBUF("Source", caam_ctx->blockbuf.buf.data,
+			   caam_ctx->blockbuf.filled);
+		AE_DUMPBUF("Result", dst->data + size_inmade, size_topost);
+	}
+
+	ret = TEE_SUCCESS;
+
+end_cipher:
+	caam_dmaobj_free(&caam_src);
+	caam_dmaobj_free(&caam_dst);
+	caam_dmaobj_free(&caam_aad);
+
+	/* Free Trash block buffer */
+	caam_free_buf(&trash_bck.buf);
+
+	return ret;
+}
+
+/*
+ * Initialize the authenticated encryption cipher module
+ *
+ * @ctrl_addr   Controller base address
+ */
+enum caam_status caam_ae_init(vaddr_t ctrl_addr __unused)
+{
+	enum caam_status retstatus = CAAM_FAILURE;
+
+	if (drvcrypt_register_authenc(&driver_ae) == TEE_SUCCESS)
+		retstatus = CAAM_NO_ERROR;
+
+	return retstatus;
+}
diff --git a/core/drivers/crypto/caam/ae/caam_ae_ccm.c b/core/drivers/crypto/caam/ae/caam_ae_ccm.c
new file mode 100644
index 000000000..38286a942
--- /dev/null
+++ b/core/drivers/crypto/caam/ae/caam_ae_ccm.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2024 NXP
+ *
+ * Implementation of Cipher CCM functions
+ */
+#include <caam_common.h>
+#include <caam_utils_mem.h>
+#include <caam_utils_status.h>
+#include <drvcrypt_math.h>
+#include <string.h>
+#include <string_ext.h>
+#include <utee_defines.h>
+
+#include "local.h"
+
+/* Length of AAD buffer size, as in SP800-38C */
+#define AAD_SIZE_LEN 2
+
+/* Nonce length */
+#define AES_CCM_MAX_NONCE_LEN 15
+
+/* Tag length */
+#define AES_CCM_MIN_TAG_LEN 4
+#define AES_CCM_MAX_TAG_LEN 16
+
+/* Adata Flag */
+#define BM_B0_ADATA_PRESENCE BIT32(6)
+
+/* B0 Tag length */
+#define BS_B0_TAG_LENGTH 3
+#define BM_B0_TAG_LENGTH SHIFT_U32(0x7, BS_B0_TAG_LENGTH)
+#define B0_TAG_LENGTH(x) \
+	(SHIFT_U32(((x) - 2) / 2, BS_B0_TAG_LENGTH) & BM_B0_TAG_LENGTH)
+
+/* B0 Payload size length */
+#define BS_B0_Q_LENGTH 0
+#define BM_B0_Q_LENGTH SHIFT_U32(0x7, BS_B0_Q_LENGTH)
+#define B0_Q_LENGTH(x) (SHIFT_U32((x) - 1, BS_B0_Q_LENGTH) & BM_B0_Q_LENGTH)
+
+/*
+ * Initialize AES CCM operation context
+ *
+ * @caam_ctx AE Cipher context
+ * @dinit    Data initialization object
+ */
+static TEE_Result caam_ae_ccm_init_ctx(struct caam_ae_ctx *caam_ctx,
+				       struct drvcrypt_authenc_init *dinit)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caambuf aad = {};
+	uint8_t *b0 = NULL;
+	uint8_t *ctr0 = NULL;
+	size_t q = 0;
+	size_t payload_len = 0;
+	size_t i = 0;
+
+	assert(caam_ctx && dinit);
+
+	if (dinit->nonce.length > AES_CCM_MAX_NONCE_LEN)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	/* The tag_len should be 4, 6, 8, 10, 12, 14 or 16 */
+	if (caam_ctx->tag_length < 4 || caam_ctx->tag_length > 16 ||
+	    caam_ctx->tag_length % 2 != 0)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	payload_len = caam_ctx->payload_length;
+
+	/*
+	 * Before AE operations CAAM ctx register
+	 * must be filled with B0 and Ctr0.
+	 */
+	b0 = caam_ctx->initial_ctx.data;
+	ctr0 = caam_ctx->initial_ctx.data + TEE_AES_BLOCK_SIZE;
+
+	/*
+	 * Set B0 initial value
+	 * B0 initial value (specification SP 800-38C) contains flags,
+	 * data length (Whole operation length in case of init update final)
+	 * and nonce
+	 */
+	memset(b0, 0, TEE_AES_BLOCK_SIZE);
+
+	/* Available length for the data size length field */
+	q = AES_CCM_MAX_NONCE_LEN - dinit->nonce.length;
+
+	/* Flags value in b0[0] */
+	b0[0] = B0_TAG_LENGTH(caam_ctx->tag_length) | B0_Q_LENGTH(q);
+	if (caam_ctx->aad_length)
+		b0[0] |= BM_B0_ADATA_PRESENCE;
+
+	/* Nonce value in b0[1..AES_CCM_MAX_NONCE_LEN] */
+	memcpy(&b0[1], dinit->nonce.data, dinit->nonce.length);
+
+	/*
+	 * Payload length as defined in SP800-38C,
+	 * A.2.1 Formatting of the Control Information and the Nonce
+	 * Payload length (i.e. Q) is store in big-endian fashion.
+	 */
+	for (i = AES_CCM_MAX_NONCE_LEN; i >= dinit->nonce.length + 1; i--,
+	     payload_len >>= 8)
+		b0[i] = payload_len & 0xFF;
+
+	/* Add AAD size to Adata */
+	if (caam_ctx->aad_length > 0) {
+		if (caam_ctx->aad_length >= AAD_LENGTH_OVERFLOW)
+			return TEE_ERROR_NOT_SUPPORTED;
+
+		retstatus = caam_calloc_align_buf(&aad, AAD_SIZE_LEN);
+		if (retstatus)
+			return caam_status_to_tee_result(retstatus);
+
+		aad.data[0] = (caam_ctx->aad_length & GENMASK_32(15, 8)) >> 8;
+		aad.data[1] = caam_ctx->aad_length & GENMASK_32(7, 0);
+		retstatus = caam_cpy_block_src(&caam_ctx->buf_aad, &aad, 0);
+		if (retstatus) {
+			ret = caam_status_to_tee_result(retstatus);
+			goto out;
+		}
+	}
+
+	/*
+	 * Set CTR0 initial value
+	 * Ctr0 initial value (specification SP 800-38C) contains flags
+	 * and nonce
+	 */
+	memset(ctr0, 0, TEE_AES_BLOCK_SIZE);
+
+	/* Flags value in ctr0[0] */
+	ctr0[0] = B0_Q_LENGTH(q);
+
+	/* Nonce value in ctr0[1..AES_CCM_MAX_NONCE_LEN] */
+	memcpy(&ctr0[1], &b0[1], dinit->nonce.length);
+
+	ret = TEE_SUCCESS;
+out:
+	caam_free_buf(&aad);
+	return ret;
+}
+
+TEE_Result caam_ae_initialize_ccm(struct drvcrypt_authenc_init *dinit)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_ae_ctx *caam_ctx = NULL;
+
+	if (!dinit || !dinit->ctx)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	caam_ctx = dinit->ctx;
+
+	if (caam_ctx->tag_length < AES_CCM_MIN_TAG_LEN ||
+	    caam_ctx->tag_length > AES_CCM_MAX_TAG_LEN)
+		return TEE_ERROR_NOT_SUPPORTED;
+
+	/* Allocate initial B0 and CTR0 input */
+	retstatus = caam_alloc_align_buf(&caam_ctx->initial_ctx,
+					 caam_ctx->alg->size_ctx);
+	if (retstatus)
+		return caam_status_to_tee_result(retstatus);
+
+	/* Initialize the AAD buffer */
+	caam_ctx->buf_aad.max = dinit->aad_len + AAD_SIZE_LEN;
+
+	ret = caam_ae_ccm_init_ctx(caam_ctx, dinit);
+	if (ret)
+		goto err;
+
+	return TEE_SUCCESS;
+err:
+	caam_free_buf(&caam_ctx->initial_ctx);
+
+	return ret;
+}
+
+TEE_Result caam_ae_final_ccm(struct drvcrypt_authenc_final *dfinal)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	struct caam_ae_ctx *caam_ctx = NULL;
+	uint8_t *encrypted_tag = NULL;
+	struct drvcrypt_mod_op mod_op = { };
+
+	if (!dfinal || !dfinal->ctx)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	caam_ctx = dfinal->ctx;
+
+	ret = caam_ae_do_update(caam_ctx, &dfinal->src, &dfinal->dst, true);
+	if (ret)
+		return ret;
+
+	if (caam_ctx->tag_length) {
+		if (dfinal->tag.length < caam_ctx->tag_length)
+			return TEE_ERROR_SHORT_BUFFER;
+
+		if (caam_ctx->encrypt) {
+			encrypted_tag = caam_ctx->ctx.data +
+					(2 * AES_CCM_MAX_TAG_LEN);
+
+			memcpy(dfinal->tag.data, encrypted_tag,
+			       caam_ctx->tag_length);
+			dfinal->tag.length = caam_ctx->tag_length;
+		} else {
+			encrypted_tag = caam_ctx->ctx.data;
+
+			mod_op.n.length = caam_ctx->tag_length;
+			mod_op.a.data = encrypted_tag;
+			mod_op.a.length = caam_ctx->tag_length;
+			mod_op.b.data = encrypted_tag +
+					(2 * AES_CCM_MAX_TAG_LEN);
+			mod_op.b.length = caam_ctx->tag_length;
+			mod_op.result.data = encrypted_tag;
+			mod_op.result.length = caam_ctx->tag_length;
+
+			ret = drvcrypt_xor_mod_n(&mod_op);
+			if (ret)
+				return ret;
+
+			if (consttime_memcmp(dfinal->tag.data, encrypted_tag,
+					     caam_ctx->tag_length))
+				return TEE_ERROR_MAC_INVALID;
+		}
+	}
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/crypto/caam/ae/caam_ae_gcm.c b/core/drivers/crypto/caam/ae/caam_ae_gcm.c
new file mode 100644
index 000000000..f6e00e303
--- /dev/null
+++ b/core/drivers/crypto/caam/ae/caam_ae_gcm.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2024 NXP
+ *
+ * Implementation of Cipher GCM functions
+ */
+#include <caam_common.h>
+#include <caam_desc_ccb_defines.h>
+#include <caam_utils_mem.h>
+#include <caam_utils_status.h>
+#include <stdint.h>
+#include <string.h>
+#include <string_ext.h>
+#include <utee_defines.h>
+
+#include "local.h"
+
+/*
+ * Default GCM nonce length
+ * CAAM Errata fix is used if nonce size is not the default one
+ */
+#define AES_GCM_DEFAULT_NONCE_LEN 12
+
+/*
+ * Context format in GCM mode
+ */
+struct gcm_caam_ctx_layout {
+	/*
+	 * 128 bits MAC value
+	 */
+	uint32_t mac[4];
+	/*
+	 * 128 bits Ctri value
+	 */
+	uint32_t yi[4];
+	/*
+	 * 128 bits Ctr0 value
+	 */
+	uint32_t y0[4];
+};
+
+/*
+ * Update of the cipher operation of complete block except
+ * last block. Last block can be partial block.
+ *
+ * @caam_ctx AE Cipher context
+ * @encrypt  Encrypt or decrypt direction
+ * @src      Source data to encrypt/decrypt
+ * @dst      [out] Destination data encrypted/decrypted
+ * @final    Last block flag
+ */
+static bool caam_ae_do_block_gcm(struct caam_ae_ctx *caam_ctx, bool encrypt,
+				 struct caamdmaobj *src, struct caamdmaobj *dst,
+				 bool final)
+{
+	/*
+	 * CAAM Errata:
+	 * When running GCM when the IV is not 12 bytes (96 bits),
+	 * it is possible to "roll over" the 32-bit counter value.
+	 * When this occurs (unless counter starts at -1),
+	 * the AES operation will generate an incorrect MAC.
+	 * This occurs even when -1 is used as the counter for the last block.
+	 * The problem is that the 32-bit counter will overflow into the h
+	 *  value, thus corrupting the MAC.
+	 * In order to reliably work around this issue,
+	 * the AES operation must be stopped after initialization to
+	 * determine the counter value to see whether/when it will roll over.
+	 * Then, before the offending block is processed,
+	 * the context needs to be saved. The one block gets processed twice :
+	 * GCM, restore MAC, GMAC over its ciphertext,
+	 * then patch up the message and AAD lengths, and carry on as normal.
+	 */
+	uint32_t *desc = NULL;
+	struct gcm_caam_ctx_layout ctx = {};
+	size_t input_length = 0;
+	uint32_t processed_blocks = 0;
+	uint32_t counter_value = 0;
+	uint32_t corrupted_block_size = 0;
+	uint32_t yi_1s_complement = 0;
+	uint32_t remaining_len = 0;
+
+	if (!caam_ctx)
+		return false;
+
+	desc = caam_ctx->descriptor;
+
+	/*
+	 *  for encrypt:
+	 *  1) Run GCM until we get to the block which will
+	 *     roll over the counter.
+	 *  2) Save the current ghash value
+	 *  3) Encrypt that one block (creating a bad hash value)
+	 *  4) Restore the hash value
+	 *  5) Save current AAD len
+	 *  6) Run ciphertext of the block in as AAD
+	 *  7) Restore the AAD len
+	 *  8) Run GCM on the rest of the message
+	 *  9) Compute and store the MAC/tag
+	 *
+	 *  for decrypt:
+	 *  1) Run GCM until we get to the block which will
+	 *     roll over the counter.
+	 *  2) Save the current ghash value
+	 *  3) Decrypt that one block (creating a bad hash value)
+	 *  4) Restore the hash value
+	 *  5) Save current AAD len
+	 *  6) Run ciphertext of the block in as AAD
+	 *  7) Restore the AAD len
+	 *  8) Run GCM on the rest of the message
+	 *  9) Compute and store the MAC/tag
+	 */
+
+	if (!src || src->orig.length == 0 ||
+	    caam_ctx->nonce.length == AES_GCM_DEFAULT_NONCE_LEN)
+		return false;
+
+	memcpy(&ctx, caam_ctx->ctx.data, sizeof(struct gcm_caam_ctx_layout));
+	processed_blocks = TEE_U32_BSWAP(ctx.yi[3]);
+	input_length = src->orig.length;
+	counter_value = processed_blocks +
+		       ((input_length + 15) >> 4); /* roundup input_length */
+
+	/* check for overflow */
+	if (counter_value >= processed_blocks)
+		return false;
+
+	assert(dst);
+
+	yi_1s_complement = (UINT32_MAX - processed_blocks) << 4;
+	if ((yi_1s_complement + TEE_AES_BLOCK_SIZE) > input_length)
+		corrupted_block_size = (input_length - yi_1s_complement);
+	else
+		corrupted_block_size = TEE_AES_BLOCK_SIZE;
+	remaining_len = input_length - (yi_1s_complement +
+			corrupted_block_size);
+
+	caam_desc_seq_out(desc, dst);
+	caam_dmaobj_cache_push(dst);
+
+	caam_desc_seq_in(desc, src);
+	caam_dmaobj_cache_push(src);
+
+	/* operation: cls1-op aes gcm update enc/dec */
+	caam_desc_add_word(desc, CIPHER_UPDATE(caam_ctx->alg->type, encrypt));
+
+	caam_desc_add_word(desc, FIFO_LD_SEQ(MSG, 0) | FIFO_STORE_EXT |
+					 CMD_CLASS(CLASS_1) |
+					 FIFO_LOAD_ACTION(LAST_C1));
+	caam_desc_add_word(desc, yi_1s_complement);
+
+	caam_desc_add_word(desc, FIFO_ST_SEQ(MSG_DATA, 0) | FIFO_STORE_EXT);
+	caam_desc_add_word(desc, yi_1s_complement);
+
+	/* jump: class1-done all-match[] always-jump offset=[01] local->[15] */
+	caam_desc_add_word(desc,
+			   JUMP_C1_LOCAL(ALL_COND_TRUE, JMP_COND(NONE), 1));
+
+	/*
+	 * move: class1-ctx+0 -> math2, len=TEE_AES_BLOCK_SIZE wait
+	 * Save the current ghash value
+	 */
+	caam_desc_add_word(desc, MOVE_WAIT(C1_CTX_REG, MATH_REG2, 0,
+					   TEE_AES_BLOCK_SIZE));
+
+	/*
+	 * ld: ind-clrw len=4 offs=0 imm
+	 *     clrw: clr_c1mode clr_c1datas reset_cls1_done reset_cls1_cha
+	 *	     clr_c2_ctx
+	 */
+	caam_desc_add_word(desc, LD_IMM(CLASS_NO, REG_CLEAR_WRITTEN, 4));
+	caam_desc_add_word(desc, CLR_WR_RST_C1_MDE | CLR_WR_RST_C1_DSZ |
+					 CLR_WR_RST_C1_CHA | CLR_WR_RST_C1_DNE |
+					 CLR_WR_RST_C2_CTX);
+
+	/*
+	 * Encrypt that one block (creating a bad hash value)
+	 * operation: cls1-op aes gcm update enc/dec
+	 */
+	caam_desc_add_word(desc, CIPHER_UPDATE(caam_ctx->alg->type, encrypt));
+
+	if (encrypt) {
+		/* seqfifold: class1 msg-last1 len=corrupted_Block_Size */
+		caam_desc_add_word(desc,
+				   FIFO_LD_SEQ(MSG, corrupted_block_size) |
+					   CMD_CLASS(CLASS_1) |
+					   FIFO_LOAD_ACTION(LAST_C1));
+
+		/* move: ofifo -> class2-ctx+0, len=corrupted_Block_Size wait */
+		caam_desc_add_word(desc, MOVE_WAIT(OFIFO, C2_CTX_REG, 0,
+						   corrupted_block_size));
+
+		/* seqstr: ccb2 ctx len=vseqoutsz offs=0 */
+		caam_desc_add_word(desc, ST_NOIMM_SEQ(CLASS_2, REG_CTX,
+						      corrupted_block_size));
+	} else {
+		/* seqfifold: both msg-last2-last1 len=corrupted_Block_Size */
+		caam_desc_add_word(desc,
+				   FIFO_LD_SEQ(MSG, corrupted_block_size) |
+					   CMD_CLASS(CLASS_DECO) |
+					   FIFO_LOAD_ACTION(LAST_C1) |
+					   FIFO_LOAD_ACTION(LAST_C2));
+
+		/*
+		 * move: class2-alnblk -> class2-ctx+0,
+		 *	 len=corrupted_Block_Size (aux_ms)
+		 */
+		caam_desc_add_word(desc, MOVE(DECO_ALIGN, C2_CTX_REG, 0,
+					      corrupted_block_size) |
+					      MOVE_AUX(0x2));
+
+		/* seqfifostr: msg len=vseqoutsz */
+		caam_desc_add_word(desc,
+				   FIFO_ST_SEQ(MSG_DATA, corrupted_block_size));
+	}
+
+	/* jump: class1-done all-match[] always-jump offset=[01] local->[23] */
+	caam_desc_add_word(desc,
+			   JUMP_C1_LOCAL(ALL_COND_TRUE, JMP_COND(NONE), 1));
+
+	/*
+	 * Restore the hash value
+	 * move: math2 -> class1-ctx+0, len=TEE_AES_BLOCK_SIZE wait
+	 */
+	caam_desc_add_word(desc, MOVE_WAIT(MATH_REG2, C1_CTX_REG, 0,
+					   TEE_AES_BLOCK_SIZE));
+
+	/*
+	 * ld: ind-clrw len=4 offs=0 imm
+	 *     clrw: clr_c1mode clr_c1datas reset_cls1_done reset_cls1_cha
+	 */
+	caam_desc_add_word(desc, LD_IMM(CLASS_NO, REG_CLEAR_WRITTEN, 4));
+	caam_desc_add_word(desc, CLR_WR_RST_C1_MDE | CLR_WR_RST_C1_DSZ |
+					 CLR_WR_RST_C1_CHA | CLR_WR_RST_C1_DNE);
+
+	/*
+	 * Save current AAD len
+	 * move: class1-ctx+48 -> math2, len=8 wait
+	 */
+	caam_desc_add_word(desc, MOVE_WAIT(C1_CTX_REG, MATH_REG2, 48, 8));
+
+	/*
+	 * Run ciphertext of the block in as AAD
+	 * move: class2-ctx+0 -> ififo, len=corrupted_Block_Size
+	 */
+	caam_desc_add_word(desc,
+			   MOVE(C2_CTX_REG, IFIFO, 0, corrupted_block_size));
+
+	/*
+	 * ld: ind-nfsl len=4 offs=0 imm
+	 * <nfifo_entry: ififo->class1 type=aad/pka1 lc1 len=16>
+	 */
+	caam_desc_add_word(desc, LD_IMM(CLASS_NO, REG_NFIFO_n_SIZE,
+					sizeof(uint32_t)));
+	caam_desc_add_word(desc, NFIFO_NOPAD(C1, NFIFO_LC1, IFIFO, AAD,
+					     corrupted_block_size));
+
+	/* operation: cls1-op aes gcm update enc/dec */
+	caam_desc_add_word(desc, CIPHER_UPDATE(caam_ctx->alg->type, encrypt));
+
+	/* jump: class1-done all-match[] always-jump offset=[01] local->[32] */
+	caam_desc_add_word(desc,
+			   JUMP_C1_LOCAL(ALL_COND_TRUE, JMP_COND(NONE), 1));
+
+	/*
+	 * Restore the AAD len
+	 * move: math2 -> class1-ctx+48, len=8 wait
+	 */
+	caam_desc_add_word(desc, MOVE_WAIT(MATH_REG2, C1_CTX_REG, 48, 8));
+
+	/*
+	 * Run GCM on the rest of the message
+	 * ld: ind-clrw len=4 offs=0 imm
+	 *     clrw: clr_c1mode clr_c1datas reset_cls1_done reset_cls1_cha
+	 */
+	caam_desc_add_word(desc, LD_IMM(CLASS_NO, REG_CLEAR_WRITTEN, 4));
+	caam_desc_add_word(desc, CLR_WR_RST_C1_MDE | CLR_WR_RST_C1_DSZ |
+					 CLR_WR_RST_C1_CHA | CLR_WR_RST_C1_DNE);
+
+	if (final)
+		caam_desc_add_word(desc,
+				   CIPHER_FINAL(caam_ctx->alg->type, encrypt));
+	else
+		caam_desc_add_word(desc,
+				   CIPHER_UPDATE(caam_ctx->alg->type, encrypt));
+	/* ptr incremented by max. 7 */
+	caam_desc_add_word(desc, FIFO_LD_SEQ(MSG, 0) | FIFO_STORE_EXT |
+					 CMD_CLASS(CLASS_1) |
+					 FIFO_LOAD_ACTION(LAST_C1));
+	caam_desc_add_word(desc, remaining_len);
+
+	caam_desc_add_word(desc, FIFO_ST_SEQ(MSG_DATA, 0) | FIFO_STORE_EXT);
+	caam_desc_add_word(desc, remaining_len);
+
+	return true;
+}
+
+TEE_Result caam_ae_initialize_gcm(struct drvcrypt_authenc_init *dinit)
+{
+	enum caam_status retstatus = CAAM_FAILURE;
+	struct caam_ae_ctx *caam_ctx = NULL;
+
+	if (!dinit || !dinit->ctx)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	caam_ctx = dinit->ctx;
+
+	if (dinit->nonce.data && dinit->nonce.length) {
+		retstatus = caam_cpy_buf_src(&caam_ctx->nonce,
+					     dinit->nonce.data,
+					     dinit->nonce.length);
+		AE_TRACE("Copy Nonce returned 0x%" PRIx32, retstatus);
+		if (retstatus)
+			return caam_status_to_tee_result(retstatus);
+	}
+
+	caam_ctx->do_block = caam_ae_do_block_gcm;
+
+	/* Initialize the AAD buffer */
+	caam_ctx->buf_aad.max = dinit->aad_len;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result caam_ae_final_gcm(struct drvcrypt_authenc_final *dfinal)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	struct caam_ae_ctx *caam_ctx = NULL;
+
+	if (!dfinal)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	caam_ctx = dfinal->ctx;
+
+	ret = caam_ae_do_update(caam_ctx, &dfinal->src, &dfinal->dst, true);
+	if (ret)
+		return ret;
+
+	if (caam_ctx->tag_length) {
+		if (dfinal->tag.length < caam_ctx->tag_length)
+			return TEE_ERROR_SHORT_BUFFER;
+
+		if (caam_ctx->encrypt) {
+			memcpy(dfinal->tag.data, caam_ctx->ctx.data,
+			       caam_ctx->tag_length);
+			dfinal->tag.length = caam_ctx->tag_length;
+		} else {
+			if (consttime_memcmp(dfinal->tag.data,
+					     caam_ctx->ctx.data,
+					     caam_ctx->tag_length))
+				return TEE_ERROR_MAC_INVALID;
+		}
+	}
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/crypto/caam/ae/local.h b/core/drivers/crypto/caam/ae/local.h
new file mode 100644
index 000000000..042a3cf2d
--- /dev/null
+++ b/core/drivers/crypto/caam/ae/local.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2024 NXP
+ */
+#ifndef __LOCAL_H__
+#define __LOCAL_H__
+
+#include <caam_utils_dmaobj.h>
+#include <drvcrypt.h>
+#include <drvcrypt_authenc.h>
+
+/* Maximum AAD size */
+#define AAD_LENGTH_OVERFLOW 0xFF00
+
+/*
+ * Cipher Algorithm definition
+ */
+struct cipheralg {
+	uint32_t type; /* Algo type for operation */
+	uint8_t size_block; /* Computing block size */
+	uint8_t size_ctx; /* CAAM Context Register size */
+	uint8_t ctx_offset; /* CAAM Context Register offset */
+	struct caamdefkey def_key; /* Define accepted key size */
+
+	TEE_Result (*initialize)(struct drvcrypt_authenc_init *dinit);
+	TEE_Result (*final)(struct drvcrypt_authenc_final *dfinal);
+};
+
+struct caam_ae_ctx {
+	uint32_t *descriptor;       /* Job descriptor */
+
+	size_t tag_length;          /* Hash tag length */
+	size_t aad_length;          /* Additional data length */
+	size_t payload_length;      /* Data length */
+
+	bool encrypt;               /* Encrypt direction */
+
+	struct caambuf key;         /* Cipher key */
+	struct caambuf initial_ctx; /* Initial CCM context */
+	struct caambuf ctx;         /* Saved context for multi-part update */
+	struct caambuf nonce;       /* Initial GCM Nonce value */
+
+	struct caamblock buf_aad;   /* Additional Data buffer if needed */
+	struct caamblock blockbuf;  /* Temporary Block buffer */
+
+	bool (*do_block)(struct caam_ae_ctx *caam_ctx, bool encrypt,
+			 struct caamdmaobj *src, struct caamdmaobj *dst,
+			 bool final);
+
+	const struct cipheralg *alg; /* Reference to the algo constants */
+};
+
+/*
+ * Update of the Authenticated Encryption Operation.
+ *
+ * @ctx      AE Cipher context
+ * @src      Source data to encrypt/decrypt
+ * @dst      [out] Destination data encrypted/decrypted
+ * @last     Last update flag
+ */
+TEE_Result caam_ae_do_update(struct caam_ae_ctx *ctx, struct drvcrypt_buf *src,
+			     struct drvcrypt_buf *dst, bool last);
+
+/*
+ * Initialization of the AES GCM operation
+ *
+ * @dinit  Data initialization object
+ */
+TEE_Result caam_ae_initialize_gcm(struct drvcrypt_authenc_init *dinit);
+
+/*
+ * Finalize the AES GCM operation
+ *
+ * @dfinal  Last data object
+ */
+TEE_Result caam_ae_final_gcm(struct drvcrypt_authenc_final *dfinal);
+
+/*
+ * Initialization of the AES CCM operation
+ *
+ * @dinit  Data initialization object
+ */
+TEE_Result caam_ae_initialize_ccm(struct drvcrypt_authenc_init *dinit);
+
+/*
+ * Finalize the AES CCM operation
+ *
+ * @dfinal  Last data object
+ */
+TEE_Result caam_ae_final_ccm(struct drvcrypt_authenc_final *dfinal);
+
+#endif /* __LOCAL_H__ */
diff --git a/core/drivers/crypto/caam/ae/sub.mk b/core/drivers/crypto/caam/ae/sub.mk
new file mode 100644
index 000000000..68bf8cef7
--- /dev/null
+++ b/core/drivers/crypto/caam/ae/sub.mk
@@ -0,0 +1,5 @@
+incdirs-y += ../include
+
+srcs-y += caam_ae.c
+srcs-$(CFG_NXP_CAAM_AE_GCM_DRV) += caam_ae_gcm.c
+srcs-$(CFG_NXP_CAAM_AE_CCM_DRV) += caam_ae_ccm.c
\ No newline at end of file
diff --git a/core/drivers/crypto/caam/caam_ctrl.c b/core/drivers/crypto/caam/caam_ctrl.c
index f24d6fb36..51df78b24 100644
--- a/core/drivers/crypto/caam/caam_ctrl.c
+++ b/core/drivers/crypto/caam/caam_ctrl.c
@@ -7,6 +7,7 @@
 #include <assert.h>
 #include <caam_acipher.h>
 #include <caam_cipher.h>
+#include <caam_ae.h>
 #include <caam_common.h>
 #include <caam_hal_cfg.h>
 #include <caam_hal_clk.h>
@@ -100,6 +101,13 @@ static TEE_Result crypto_driver_init(void)
 		goto exit_init;
 	}
 
+	/* Initialize the Authenticated Encryption Module */
+	retstatus = caam_ae_init(jrcfg.base);
+	if (retstatus != CAAM_NO_ERROR) {
+		retresult = TEE_ERROR_GENERIC;
+		goto exit_init;
+	}
+
 	/* Initialize the HMAC Module */
 	retstatus = caam_hmac_init(&jrcfg);
 	if (retstatus != CAAM_NO_ERROR) {
diff --git a/core/drivers/crypto/caam/caam_desc.c b/core/drivers/crypto/caam/caam_desc.c
index 361e1aa10..43f070a17 100644
--- a/core/drivers/crypto/caam/caam_desc.c
+++ b/core/drivers/crypto/caam/caam_desc.c
@@ -142,6 +142,7 @@ void caam_desc_add_dmaobj(uint32_t *desc, struct caamdmaobj *data,
 		op_length = KEY_LENGTH(data->sgtbuf.length);
 		break;
 
+	case CMD_SEQ_IN_TYPE:
 	case CMD_SEQ_OUT_TYPE:
 		op_length = SEQ_LENGTH(data->sgtbuf.length);
 		op_ext_length = SEQ_EXT;
diff --git a/core/drivers/crypto/caam/caam_key.c b/core/drivers/crypto/caam/caam_key.c
index 5205b8b19..2d13e7b16 100644
--- a/core/drivers/crypto/caam/caam_key.c
+++ b/core/drivers/crypto/caam/caam_key.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2023 NXP
+ * Copyright 2023-2024 NXP
  */
 #include <assert.h>
 #include <caam_desc_helper.h>
@@ -57,8 +57,8 @@
  * makes it non-volatile and can be re-created when the chip powers up again.
  */
 #define KEY_BLOB_MODIFIER_SIZE 16
-static const uint8_t key_blob_modifier[KEY_BLOB_MODIFIER_SIZE] =
-	"NXP_OPTEE_BLOB";
+#define KEY_BLOB_MODIFIER "NXP_KEY_MODIFIER"
+static uint8_t *key_blob_modifier;
 
 /*
  * Serialized CAAM key structure format.
@@ -379,6 +379,8 @@ enum caam_status caam_key_operation_blob(const struct caamkey *in_key,
 
 	KEY_DUMPDESC(desc);
 
+	cache_operation(TEE_CACHECLEAN, key_blob_modifier,
+			KEY_BLOB_MODIFIER_SIZE);
 	caam_key_cache_op(TEE_CACHECLEAN, in_key);
 	caam_key_cache_op(TEE_CACHECLEAN, out_key);
 
@@ -743,6 +745,12 @@ enum caam_status caam_key_init(void)
 
 	assert(alloc_size <= CFG_CORE_BIGNUM_MAX_BITS);
 
+	key_blob_modifier = caam_calloc_align(KEY_BLOB_MODIFIER_SIZE);
+	if (!key_blob_modifier)
+		return CAAM_FAILURE;
+
+	memcpy(key_blob_modifier, KEY_BLOB_MODIFIER, KEY_BLOB_MODIFIER_SIZE);
+
 	KEY_TRACE("Max serialized key size %zu", alloc_size);
 
 	KEY_TRACE("Default CAAM key generation type %s",
diff --git a/core/drivers/crypto/caam/caam_rng.c b/core/drivers/crypto/caam/caam_rng.c
index b4f8b9575..38386926d 100644
--- a/core/drivers/crypto/caam/caam_rng.c
+++ b/core/drivers/crypto/caam/caam_rng.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /**
- * Copyright 2017-2021 NXP
+ * Copyright 2017-2021, 2024 NXP
  *
  * Brief   CAAM Random Number Generator manager.
  *         Implementation of RNG functions.
@@ -11,6 +11,7 @@
 #include <caam_jr.h>
 #include <caam_rng.h>
 #include <caam_utils_mem.h>
+#include <caam_utils_status.h>
 #include <crypto/crypto.h>
 #include <kernel/panic.h>
 #include <mm/core_memprot.h>
@@ -19,41 +20,11 @@
 #include <tee/tee_cryp_utl.h>
 #include <string.h>
 
-/*
- * Define the RNG Data buffer size and number
- */
-#define RNG_DATABUF_SIZE	1024
-#define RNG_DATABUF_NB		2
-
 /*
  * Define the number of descriptor entry to generate random data
  */
 #define RNG_GEN_DESC_ENTRIES	5
 
-/*
- * Status of the data generation
- */
-enum rngsta {
-	DATA_EMPTY = 0, /* Data bufer empty */
-	DATA_ONGOING,   /* Data generation on going */
-	DATA_FAILURE,   /* Error during data generation */
-	DATA_OK,        /* Data generation complete with success */
-};
-
-/*
- * RNG Data generation
- */
-struct rngdata {
-	struct caam_jobctx jobctx; /* Job Ring Context */
-	uint32_t job_id;           /* Job Id enqueued */
-
-	uint8_t *data;           /* Random Data buffer */
-	size_t size;             /* Size in bytes of the Random data buffer */
-	size_t rdindex;          /* Current data index in the buffer */
-
-	enum rngsta status;      /* Status of the data generation */
-};
-
 /*
  * RNG module private data
  */
@@ -61,8 +32,6 @@ struct rng_privdata {
 	vaddr_t baseaddr;                       /* RNG base address */
 	bool instantiated;                      /* RNG instantiated */
 	bool pr_enabled;			/* RNG prediction resistance */
-	struct rngdata databuf[RNG_DATABUF_NB]; /* RNG Data generation */
-	uint8_t dataidx;                        /* Current RNG Data buffer */
 };
 
 static struct rng_privdata *rng_privdata;
@@ -70,9 +39,6 @@ static struct rng_privdata *rng_privdata;
 /* Allocate and initialize module private data */
 static enum caam_status do_allocate(void)
 {
-	struct rngdata *rngdata = NULL;
-	unsigned int idx = 0;
-
 	/* Allocate the Module resources */
 	rng_privdata = caam_calloc(sizeof(*rng_privdata));
 	if (!rng_privdata) {
@@ -82,201 +48,17 @@ static enum caam_status do_allocate(void)
 
 	rng_privdata->instantiated = false;
 
-	/* Allocates the RNG Data Buffers */
-	for (idx = 0; idx < RNG_DATABUF_NB; idx++) {
-		rngdata = &rng_privdata->databuf[idx];
-		rngdata->data = caam_calloc_align(RNG_DATABUF_SIZE);
-		if (!rngdata->data)
-			return CAAM_OUT_MEMORY;
-
-		rngdata->size = RNG_DATABUF_SIZE;
-		rngdata->jobctx.desc = caam_calloc_desc(RNG_GEN_DESC_ENTRIES);
-		if (!rngdata->jobctx.desc)
-			return CAAM_OUT_MEMORY;
-	}
-
 	return CAAM_NO_ERROR;
 }
 
 /* Free module private data */
 static void do_free(void)
 {
-	struct rngdata *rng = NULL;
-	unsigned int idx = 0;
-
-	if (rng_privdata) {
-		for (idx = 0; idx < RNG_DATABUF_NB; idx++) {
-			rng = &rng_privdata->databuf[idx];
-
-			/* Check if there is a Job ongoing to cancel it */
-			if (atomic_load_u32(&rng->status) == DATA_ONGOING)
-				caam_jr_cancel(rng->job_id);
-
-			caam_free_desc(&rng->jobctx.desc);
-			caam_free(rng->data);
-			rng->data = NULL;
-		}
-
-		caam_free(rng_privdata);
-		rng_privdata = NULL;
-	}
+	caam_free(rng_privdata);
+	rng_privdata = NULL;
 }
 
 #ifdef CFG_NXP_CAAM_RNG_DRV
-/*
- * RNG data generation job ring callback completion
- *
- * @jobctx      RNG data JR Job Context
- */
-static void rng_data_done(struct caam_jobctx *jobctx)
-{
-	struct rngdata *rng = jobctx->context;
-
-	RNG_TRACE("RNG Data id 0x%08" PRIx32 " done with status 0x%" PRIx32,
-		  rng->job_id, jobctx->status);
-
-	if (JRSTA_SRC_GET(jobctx->status) == JRSTA_SRC(NONE)) {
-		atomic_store_u32(&rng->status, DATA_OK);
-
-		/* Invalidate the data buffer to ensure software gets it */
-		cache_operation(TEE_CACHEINVALIDATE, rng->data, rng->size);
-	} else {
-		RNG_TRACE("RNG Data completion in error 0x%" PRIx32,
-			  jobctx->status);
-		atomic_store_u32(&rng->status, DATA_FAILURE);
-	}
-
-	rng->job_id = 0;
-	rng->rdindex = 0;
-}
-
-/*
- * Prepares the data generation descriptors
- *
- * @rng       Reference to the RNG Data object
- */
-static enum caam_status prepare_gen_desc(struct rngdata *rng)
-{
-	paddr_t paddr = 0;
-	uint32_t *desc = NULL;
-	uint32_t op = RNG_GEN_DATA;
-
-	if (rng_privdata->pr_enabled)
-		op |= ALGO_RNG_PR;
-
-	/* Convert the buffer virtual address to physical address */
-	paddr = virt_to_phys(rng->data);
-	if (!paddr)
-		return CAAM_FAILURE;
-
-	desc = rng->jobctx.desc;
-
-	caam_desc_init(desc);
-	caam_desc_add_word(desc, DESC_HEADER(0));
-	caam_desc_add_word(desc, op);
-	caam_desc_add_word(desc, FIFO_ST(CLASS_NO, RNG_TO_MEM, rng->size));
-	caam_desc_add_ptr(desc, paddr);
-
-	RNG_DUMPDESC(desc);
-
-	/* Prepare the job context */
-	rng->jobctx.context = rng;
-	rng->jobctx.callback = rng_data_done;
-	return CAAM_NO_ERROR;
-}
-
-/*
- * Launches a RNG Data generation
- *
- * @rng      RNG Data context
- */
-static enum caam_status do_rng_start(struct rngdata *rng)
-{
-	enum caam_status ret = CAAM_FAILURE;
-
-	/* Ensure that data buffer is visible from the HW */
-	cache_operation(TEE_CACHEFLUSH, rng->data, rng->size);
-
-	rng->job_id = 0;
-	atomic_store_u32(&rng->status, DATA_EMPTY);
-
-	ret = caam_jr_enqueue(&rng->jobctx, &rng->job_id);
-
-	if (ret == CAAM_PENDING) {
-		atomic_store_u32(&rng->status, DATA_ONGOING);
-		ret = CAAM_NO_ERROR;
-	} else {
-		RNG_TRACE("RNG Job Ring Error 0x%08x", ret);
-		atomic_store_u32(&rng->status, DATA_FAILURE);
-		ret = CAAM_FAILURE;
-	}
-
-	return ret;
-}
-
-/* Checks if there are random data available */
-static enum caam_status do_check_data(void)
-{
-	enum caam_status ret = CAAM_FAILURE;
-	struct rngdata *rng = NULL;
-	uint32_t wait_jobs = 0;
-	unsigned int idx = 0;
-	unsigned int loop = 4;
-
-	/* Check if there is a RNG Job to be run */
-	for (idx = 0; idx < RNG_DATABUF_NB; idx++) {
-		rng = &rng_privdata->databuf[idx];
-		if (atomic_load_u32(&rng->status) == DATA_EMPTY) {
-			RNG_TRACE("Start RNG #%" PRIu32 " data generation",
-				  idx);
-			ret = do_rng_start(rng);
-			if (ret != CAAM_NO_ERROR)
-				return CAAM_FAILURE;
-		}
-	}
-
-	/* Check if the current data buffer contains data */
-	rng = &rng_privdata->databuf[rng_privdata->dataidx];
-
-	switch (atomic_load_u32(&rng->status)) {
-	case DATA_OK:
-		return CAAM_NO_ERROR;
-
-	case DATA_FAILURE:
-		return CAAM_FAILURE;
-
-	default:
-		/* Wait until one of the data buffer completes */
-		do {
-			wait_jobs = 0;
-			for (idx = 0; idx < RNG_DATABUF_NB; idx++) {
-				rng = &rng_privdata->databuf[idx];
-				wait_jobs |= rng->job_id;
-
-				if (atomic_load_u32(&rng->status) == DATA_OK) {
-					RNG_TRACE("RNG Data buffer #%" PRIu32
-						  " ready",
-						  idx);
-					rng_privdata->dataidx = idx;
-					return CAAM_NO_ERROR;
-				}
-			}
-
-			if (!wait_jobs) {
-				RNG_TRACE("There are no Data Buffers ongoing");
-				return CAAM_FAILURE;
-			}
-
-			/* Need to wait until one of the jobs completes */
-			(void)caam_jr_dequeue(wait_jobs, 100);
-		} while (loop--);
-
-		break;
-	}
-
-	return CAAM_FAILURE;
-}
-
 /*
  * Return the requested random data
  *
@@ -285,9 +67,12 @@ static enum caam_status do_check_data(void)
  */
 static TEE_Result do_rng_read(uint8_t *buf, size_t len)
 {
-	struct rngdata *rng = NULL;
-	size_t remlen = len;
-	uint8_t *rngbuf = buf;
+	TEE_Result ret = TEE_ERROR_GENERIC;
+	uint32_t *desc = NULL;
+	uint32_t op = RNG_GEN_DATA;
+	void *rng_data = NULL;
+	paddr_t paddr = 0;
+	struct caam_jobctx jobctx = { };
 
 	if (!rng_privdata) {
 		RNG_TRACE("RNG Driver not initialized");
@@ -299,60 +84,56 @@ static TEE_Result do_rng_read(uint8_t *buf, size_t len)
 		return TEE_ERROR_BAD_STATE;
 	}
 
-	do {
-		if (do_check_data() != CAAM_NO_ERROR) {
-			RNG_TRACE("No Data available or Error");
-			return TEE_ERROR_BAD_STATE;
-		}
+	rng_data = caam_calloc_align(len);
+	if (!rng_data) {
+		RNG_TRACE("RNG buffer allocation failed");
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
 
-		rng = &rng_privdata->databuf[rng_privdata->dataidx];
-		RNG_TRACE("Context #%" PRIu8
-			  " contains %zu data asked %zu (%zu)",
-			  rng_privdata->dataidx, rng->size - rng->rdindex,
-			  remlen, len);
+	/* Ensure that data buffer is visible from the HW */
+	cache_operation(TEE_CACHEFLUSH, rng_data, len);
 
-		/* Check that current context data are available */
-		if ((rng->size - rng->rdindex) <= remlen) {
-			/*
-			 * There is no or just enough data available,
-			 * copy all data
-			 */
-			RNG_TRACE("Copy all available data");
-			memcpy(rngbuf, &rng->data[rng->rdindex],
-			       rng->size - rng->rdindex);
-
-			remlen -= rng->size - rng->rdindex;
-			rngbuf += rng->size - rng->rdindex;
-			/* Set the RNG data status as empty */
-			atomic_store_u32(&rng->status, DATA_EMPTY);
-		} else {
-			/* There is enough data in the current context */
-			RNG_TRACE("Copy %zu data", remlen);
-			memcpy(rngbuf, &rng->data[rng->rdindex], remlen);
-			rng->rdindex += remlen;
-			remlen = 0;
-		}
-	} while (remlen);
+	/* Convert the buffer virtual address to physical address */
+	paddr = virt_to_phys(rng_data);
+	if (!paddr) {
+		RNG_TRACE("Virtual/Physical conversion failed");
+		goto exit;
+	}
 
-	return TEE_SUCCESS;
-}
+	desc = caam_calloc_desc(RNG_GEN_DESC_ENTRIES);
+	if (!desc) {
+		RNG_TRACE("Descriptor allocation failed");
+		ret = TEE_ERROR_OUT_OF_MEMORY;
+		goto exit;
+	}
 
-/* Initialize the RNG module to generate data */
-static enum caam_status caam_rng_init_data(void)
-{
-	enum caam_status retstatus = CAAM_FAILURE;
-	struct rngdata *rng = NULL;
-	unsigned int idx = 0;
+	if (IS_ENABLED(CFG_CAAM_RNG_RUNTIME_PR)) {
+		if (rng_privdata->pr_enabled)
+			op |= ALGO_RNG_PR;
+	}
 
-	for (idx = 0; idx < RNG_DATABUF_NB; idx++) {
-		rng = &rng_privdata->databuf[idx];
-		retstatus = prepare_gen_desc(rng);
+	caam_desc_init(desc);
+	caam_desc_add_word(desc, DESC_HEADER(0));
+	caam_desc_add_word(desc, op);
+	caam_desc_add_word(desc, FIFO_ST(CLASS_NO, RNG_TO_MEM, len));
+	caam_desc_add_ptr(desc, paddr);
+
+	jobctx.desc = desc;
+	RNG_DUMPDESC(desc);
 
-		if (retstatus != CAAM_NO_ERROR)
-			break;
+	if (!caam_jr_enqueue(&jobctx, NULL)) {
+		cache_operation(TEE_CACHEINVALIDATE, rng_data, len);
+		memcpy(buf, rng_data, len);
+		ret = TEE_SUCCESS;
+	} else {
+		RNG_TRACE("CAAM Status 0x%08" PRIx32, jobctx.status);
+		ret = job_status_to_tee_result(jobctx.status);
 	}
 
-	return retstatus;
+exit:
+	caam_free(rng_data);
+	caam_free_desc(&desc);
+	return ret;
 }
 #endif /* CFG_NXP_CAAM_RNG_DRV */
 
@@ -555,11 +336,6 @@ enum caam_status caam_rng_init(vaddr_t ctrl_addr)
 		retstatus = caam_rng_instantiation();
 	}
 
-#ifdef CFG_NXP_CAAM_RNG_DRV
-	if (retstatus == CAAM_NO_ERROR)
-		retstatus = caam_rng_init_data();
-#endif
-
 	if (retstatus != CAAM_NO_ERROR)
 		do_free();
 
diff --git a/core/drivers/crypto/caam/caam_sm.c b/core/drivers/crypto/caam/caam_sm.c
index 231df870e..0fe8fc279 100644
--- a/core/drivers/crypto/caam/caam_sm.c
+++ b/core/drivers/crypto/caam/caam_sm.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2019, 2023 NXP
+ * Copyright 2019, 2023-2024 NXP
  */
 #include <caam_common.h>
 #include <caam_hal_ctrl.h>
@@ -124,8 +124,8 @@ enum caam_status
 caam_sm_set_access_perm(const struct caam_sm_page_desc *page_desc,
 			unsigned int grp1_perm, unsigned int grp2_perm)
 {
-	uint32_t grp1 = UINT32_MAX;
-	uint32_t grp2 = UINT32_MAX;
+	uint32_t grp1 = 0;
+	uint32_t grp2 = 0;
 
 	if (!page_desc)
 		return CAAM_BAD_PARAM;
@@ -144,12 +144,18 @@ caam_sm_set_access_perm(const struct caam_sm_page_desc *page_desc,
 	 *
 	 * The Access Group is related to the Job Ring owner setting without
 	 * the Secure Bit setting already managed by the Job Ring.
+	 *
+	 * If any group permissions are set, need to enable Secure World MID
+	 * access in SMAG1/2 registers.
+	 * Since both Non-Secure/Secure world has same MID, using JROWN_ARM_NS
+	 * and if any grp1_perm/grp2_perm is set, need to enable permission
+	 * for Secure World for partition in SMAG1/2 Registers.
 	 */
 	if (grp1_perm)
-		grp1 = JROWN_ARM_NS;
+		grp1 = SHIFT_U32(1, JROWN_ARM_NS);
 
 	if (grp2_perm)
-		grp2 = JROWN_ARM_NS;
+		grp2 = SHIFT_U32(1, JROWN_ARM_NS);
 
 	caam_hal_sm_set_access_group(sm_privdata.jr_addr, page_desc->partition,
 				     grp1, grp2);
diff --git a/core/drivers/crypto/caam/cipher/caam_cipher.c b/core/drivers/crypto/caam/cipher/caam_cipher.c
index 29cb8f419..b5c737e9b 100644
--- a/core/drivers/crypto/caam/cipher/caam_cipher.c
+++ b/core/drivers/crypto/caam/cipher/caam_cipher.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2018-2021 NXP
+ * Copyright 2018-2021, 2024 NXP
  *
  * Implementation of Cipher functions
  */
@@ -116,33 +116,6 @@ static const struct cipheralg des3_alg[] = {
 	},
 };
 
-/*
- * Allocate context data and copy input data into
- *
- * @dst  [out] Destination data to allocate and fill
- * @src  Source of data to copy
- */
-static enum caam_status copy_ctx_data(struct caambuf *dst,
-				      struct drvcrypt_buf *src)
-{
-	enum caam_status ret = CAAM_OUT_MEMORY;
-
-	if (!dst->data) {
-		/* Allocate the destination buffer */
-		ret = caam_alloc_align_buf(dst, src->length);
-		if (ret != CAAM_NO_ERROR)
-			return CAAM_OUT_MEMORY;
-	}
-
-	/* Do the copy */
-	memcpy(dst->data, src->data, dst->length);
-
-	/* Push data to physical memory */
-	cache_operation(TEE_CACHEFLUSH, dst->data, dst->length);
-
-	return CAAM_NO_ERROR;
-}
-
 /*
  * Verify the input key size with the requirements
  *
@@ -384,38 +357,21 @@ void caam_cipher_copy_state(void *dst_ctx, void *src_ctx)
 		caam_cpy_block_src(&dst->blockbuf, &srcdata, 0);
 	}
 
-	if (src->key1.length) {
-		struct drvcrypt_buf key1 = {
-			.data = src->key1.data,
-			.length = src->key1.length
-		};
-		copy_ctx_data(&dst->key1, &key1);
-	}
+	if (src->key1.length)
+		caam_cpy_buf_src(&dst->key1, src->key1.data, src->key1.length);
 
-	if (src->key2.length) {
-		struct drvcrypt_buf key2 = {
-			.data = src->key2.data,
-			.length = src->key2.length
-		};
-		copy_ctx_data(&dst->key2, &key2);
-	}
+	if (src->key2.length)
+		caam_cpy_buf_src(&dst->key2, src->key2.data, src->key2.length);
 
 	if (src->ctx.length) {
-		struct drvcrypt_buf ctx = {
-			.data = src->ctx.data,
-			.length = src->ctx.length
-		};
-		cache_operation(TEE_CACHEINVALIDATE, ctx.data, ctx.length);
-		copy_ctx_data(&dst->ctx, &ctx);
+		cache_operation(TEE_CACHEINVALIDATE, src->ctx.data,
+				src->ctx.length);
+		caam_cpy_buf_src(&dst->ctx, src->ctx.data, src->ctx.length);
 	}
 
-	if (src->tweak.length) {
-		struct drvcrypt_buf tweak = {
-			.data = src->tweak.data,
-			.length = src->tweak.length
-		};
-		copy_ctx_data(&dst->tweak, &tweak);
-	}
+	if (src->tweak.length)
+		caam_cpy_buf_src(&dst->tweak, src->tweak.data,
+				 src->tweak.length);
 }
 
 TEE_Result caam_cipher_initialize(struct drvcrypt_cipher_init *dinit)
@@ -444,7 +400,9 @@ TEE_Result caam_cipher_initialize(struct drvcrypt_cipher_init *dinit)
 		}
 
 		/* Copy the key 1 */
-		retstatus = copy_ctx_data(&cipherdata->key1, &dinit->key1);
+		retstatus = caam_cpy_buf_src(&cipherdata->key1,
+					     dinit->key1.data,
+					     dinit->key1.length);
 		CIPHER_TRACE("Copy Key 1 returned 0x%" PRIx32, retstatus);
 
 		if (retstatus != CAAM_NO_ERROR) {
@@ -464,7 +422,9 @@ TEE_Result caam_cipher_initialize(struct drvcrypt_cipher_init *dinit)
 		}
 
 		/* Copy the key 2 */
-		retstatus = copy_ctx_data(&cipherdata->key2, &dinit->key2);
+		retstatus = caam_cpy_buf_src(&cipherdata->key2,
+					     dinit->key2.data,
+					     dinit->key2.length);
 		CIPHER_TRACE("Copy Key 2 returned 0x%" PRIx32, retstatus);
 
 		if (retstatus != CAAM_NO_ERROR) {
@@ -488,7 +448,8 @@ TEE_Result caam_cipher_initialize(struct drvcrypt_cipher_init *dinit)
 			     alg->size_ctx);
 
 		/* Copy the IV into the context register */
-		retstatus = copy_ctx_data(&cipherdata->ctx, &dinit->iv);
+		retstatus = caam_cpy_buf_src(&cipherdata->ctx, dinit->iv.data,
+					     dinit->iv.length);
 		CIPHER_TRACE("Copy IV returned 0x%" PRIx32, retstatus);
 
 		if (retstatus != CAAM_NO_ERROR) {
@@ -506,8 +467,9 @@ TEE_Result caam_cipher_initialize(struct drvcrypt_cipher_init *dinit)
 			}
 
 			/* Copy the tweak */
-			retstatus = copy_ctx_data(&cipherdata->tweak,
-						  &dinit->iv);
+			retstatus = caam_cpy_buf_src(&cipherdata->tweak,
+						     dinit->iv.data,
+						     dinit->iv.length);
 			CIPHER_TRACE("Copy Tweak returned 0x%" PRIx32,
 				     retstatus);
 
diff --git a/core/drivers/crypto/caam/crypto.mk b/core/drivers/crypto/caam/crypto.mk
index 0b54b2be2..191f4d3d8 100644
--- a/core/drivers/crypto/caam/crypto.mk
+++ b/core/drivers/crypto/caam/crypto.mk
@@ -20,6 +20,7 @@ ifeq ($(CFG_NXP_CAAM),y)
 # DBG_DH     BIT32(13) // DH Trace
 # DBG_DSA    BIT32(14) // DSA trace
 # DBG_MP     BIT32(15) // MP trace
+# DBG_AE     BIT32(17) // AE trace
 CFG_DBG_CAAM_TRACE ?= 0x2
 CFG_DBG_CAAM_DESC ?= 0x0
 CFG_DBG_CAAM_BUF ?= 0x0
@@ -28,7 +29,7 @@ CFG_DBG_CAAM_BUF ?= 0x0
 caam-drivers = RNG BLOB
 
 # CAAM default drivers connected to the HW crypto API
-caam-crypto-drivers = CIPHER HASH HMAC CMAC
+caam-crypto-drivers = CIPHER HASH HMAC CMAC AE_CCM
 
 ifneq (,$(filter $(PLATFORM_FLAVOR),ls1012ardb ls1043ardb ls1046ardb))
 $(call force, CFG_CAAM_BIG_ENDIAN,y)
@@ -39,7 +40,7 @@ $(call force, CFG_CAAM_SGT_ALIGN,4)
 $(call force, CFG_CAAM_64BIT,y)
 $(call force, CFG_NXP_CAAM_SGT_V1,y)
 $(call force, CFG_CAAM_ITR,n)
-caam-crypto-drivers += RSA DSA ECC DH MATH
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),ls1088ardb ls2088ardb ls1028ardb))
 $(call force, CFG_CAAM_LITTLE_ENDIAN,y)
 $(call force, CFG_JR_BLOCK_SIZE,0x10000)
@@ -49,7 +50,7 @@ $(call force, CFG_NXP_CAAM_SGT_V2,y)
 $(call force, CFG_CAAM_SGT_ALIGN,4)
 $(call force, CFG_CAAM_64BIT,y)
 $(call force, CFG_CAAM_ITR,n)
-caam-crypto-drivers += RSA DSA ECC DH MATH
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),lx2160aqds lx2160ardb))
 $(call force, CFG_CAAM_LITTLE_ENDIAN,y)
 $(call force, CFG_JR_BLOCK_SIZE,0x10000)
@@ -60,14 +61,15 @@ $(call force, CFG_NXP_CAAM_SGT_V2,y)
 $(call force, CFG_CAAM_SGT_ALIGN,4)
 $(call force, CFG_CAAM_64BIT,y)
 $(call force, CFG_CAAM_ITR,n)
-caam-crypto-drivers += RSA DSA ECC DH MATH
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8qm-flavorlist) $(mx8qx-flavorlist)))
 $(call force, CFG_CAAM_SIZE_ALIGN,4)
 $(call force, CFG_JR_BLOCK_SIZE,0x10000)
 $(call force, CFG_JR_INDEX,3)
 $(call force, CFG_JR_INT,486)
 $(call force, CFG_NXP_CAAM_SGT_V1,y)
-caam-crypto-drivers += RSA DSA ECC DH MATH
+$(call force, CFG_CAAM_JR_DISABLE_NODE,n)
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8dxl-flavorlist)))
 $(call force, CFG_CAAM_SIZE_ALIGN,4)
 $(call force, CFG_JR_BLOCK_SIZE,0x10000)
@@ -75,7 +77,7 @@ $(call force, CFG_JR_INDEX,3)
 $(call force, CFG_JR_INT,356)
 $(call force, CFG_NXP_CAAM_SGT_V1,y)
 $(call force, CFG_CAAM_JR_DISABLE_NODE,n)
-caam-crypto-drivers += RSA DSA ECC DH MATH
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8mm-flavorlist) $(mx8mn-flavorlist) \
 	$(mx8mp-flavorlist) $(mx8mq-flavorlist)))
 $(call force, CFG_JR_BLOCK_SIZE,0x1000)
@@ -89,19 +91,21 @@ $(call force, CFG_JR_HAB_INDEX,0)
 # this issue, controlled by CFG_NXP_CAAM_C2_CTX_REG_WA flag.
 $(call force, CFG_NXP_CAAM_C2_CTX_REG_WA,y)
 caam-drivers += MP DEK
-caam-crypto-drivers += RSA DSA ECC DH MATH
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx8ulp-flavorlist)))
 $(call force, CFG_JR_BLOCK_SIZE,0x1000)
 $(call force, CFG_JR_INDEX,2)
 $(call force, CFG_JR_INT,114)
 $(call force, CFG_NXP_CAAM_SGT_V1,y)
 $(call force, CFG_CAAM_ITR,n)
+caam-crypto-drivers += AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx7ulp-flavorlist)))
 $(call force, CFG_JR_BLOCK_SIZE,0x1000)
 $(call force, CFG_JR_INDEX,0)
 $(call force, CFG_JR_INT,137)
 $(call force, CFG_NXP_CAAM_SGT_V1,y)
 $(call force, CFG_CAAM_ITR,n)
+caam-crypto-drivers += AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6ul-flavorlist) $(mx7d-flavorlist) \
 	$(mx7s-flavorlist)))
 $(call force, CFG_JR_BLOCK_SIZE,0x1000)
@@ -109,10 +113,10 @@ $(call force, CFG_JR_INDEX,0)
 $(call force, CFG_JR_INT,137)
 $(call force, CFG_NXP_CAAM_SGT_V1,y)
 caam-drivers += MP
-caam-crypto-drivers += RSA DSA ECC DH MATH
+caam-crypto-drivers += RSA DSA ECC DH MATH AE_GCM
 else ifneq (,$(filter $(PLATFORM_FLAVOR),$(mx6q-flavorlist) $(mx6qp-flavorlist) \
 	$(mx6sx-flavorlist) $(mx6d-flavorlist) $(mx6dl-flavorlist) \
-        $(mx6s-flavorlist) $(mx8ulp-flavorlist)))
+	$(mx6s-flavorlist) $(mx8ulp-flavorlist)))
 $(call force, CFG_JR_BLOCK_SIZE,0x1000)
 $(call force, CFG_JR_INDEX,0)
 $(call force, CFG_JR_INT,137)
@@ -148,8 +152,18 @@ CFG_CAAM_JR_DISABLE_NODE ?= y
 # Define the default CAAM private key encryption generation and the bignum
 # maximum size needed.
 # CAAM_KEY_PLAIN_TEXT    -> 4096 bits
-# CAAM_KEY_BLACK_ECB|CCM -> 4156 bits
-CFG_CORE_BIGNUM_MAX_BITS ?= 4156
+# CAAM_KEY_BLACK_ECB|CCM -> 4576 bits
+# 4096 (RSA Max key size) +  12 * 8 (Header serialization) +
+# 48 * 8 (Black blob overhead in bytes) = 4576 bits
+CFG_CORE_BIGNUM_MAX_BITS ?= 4576
+
+# Define if Prediction Resistance is enabled for every Random number
+# request to CAAM.
+# Performance of getting Random number from CAAM drastically decreases with
+# PR enabled.
+# Users who want CAAM RNG to get reseeded on every Random number request
+# can set this flag to y.
+CFG_CAAM_RNG_RUNTIME_PR ?= n
 
 # Enable CAAM non-crypto drivers
 $(foreach drv, $(caam-drivers), $(eval CFG_NXP_CAAM_$(drv)_DRV ?= y))
@@ -180,6 +194,11 @@ ifeq ($(CFG_NXP_CAAM_CIPHER_DRV), y)
 $(call force, CFG_CRYPTO_DRV_CIPHER,y,Mandated by CFG_NXP_CAAM_CIPHER_DRV)
 endif
 
+# Enable AE crypto driver
+ifeq ($(call cfg-one-enabled,CFG_NXP_CAAM_AE_CCM_DRV CFG_NXP_CAAM_AE_GCM_DRV),y)
+$(call force, CFG_CRYPTO_DRV_AUTHENC,y,Mandated by CFG_NXP_CAAM_AE_CCM/GCM_DRV)
+endif
+
 # Enable HASH crypto driver
 ifeq ($(CFG_NXP_CAAM_HASH_DRV), y)
 $(call force, CFG_CRYPTO_DRV_HASH,y,Mandated by CFG_NXP_CAAM_HASH_DRV)
diff --git a/core/drivers/crypto/caam/hal/common/hal_cfg.c b/core/drivers/crypto/caam/hal/common/hal_cfg.c
index 2bdf9e655..06dc723eb 100644
--- a/core/drivers/crypto/caam/hal/common/hal_cfg.c
+++ b/core/drivers/crypto/caam/hal/common/hal_cfg.c
@@ -54,6 +54,7 @@ enum caam_status caam_hal_cfg_get_conf(struct caam_jrcfg *jrcfg)
 		jrcfg->it_num = CFG_JR_INT;
 
 		if (IS_ENABLED(CFG_NXP_CAAM_RUNTIME_JR) &&
+		    IS_ENABLED(CFG_CAAM_JR_DISABLE_NODE) &&
 		    !is_embedded_dt(fdt)) {
 			if (fdt) {
 				/* Ensure Secure Job Ring is secure in DTB */
@@ -66,6 +67,12 @@ enum caam_status caam_hal_cfg_get_conf(struct caam_jrcfg *jrcfg)
 
 	retstatus = CAAM_NO_ERROR;
 
+#ifdef CFG_NXP_CAAM_RUNTIME_JR
+	caam_hal_jr_prepare_backup(jrcfg->base, jrcfg->offset);
+#endif
+
+	caam_hal_cfg_hab_jr_mgmt(jrcfg);
+
 exit_get_conf:
 	HAL_TRACE("HAL CFG Get CAAM config ret (0x%x)\n", retstatus);
 	return retstatus;
@@ -80,6 +87,12 @@ void __weak caam_hal_cfg_setup_nsjobring(struct caam_jrcfg *jrcfg)
 	for (jrnum = caam_hal_ctrl_jrnum(jrcfg->base); jrnum; jrnum--) {
 		jr_offset = jrnum * JRX_BLOCK_SIZE;
 
+		/*
+		 * Skip configuration for the JR used by the HAB
+		 */
+		if (caam_hal_cfg_is_hab_jr(jr_offset))
+			continue;
+
 #ifdef CFG_NXP_CAAM_RUNTIME_JR
 		/*
 		 * When the Cryptographic driver is enabled, keep the
@@ -98,3 +111,12 @@ void __weak caam_hal_cfg_setup_nsjobring(struct caam_jrcfg *jrcfg)
 			caam_hal_jr_prepare_backup(jrcfg->base, jr_offset);
 	}
 }
+
+__weak void caam_hal_cfg_hab_jr_mgmt(struct caam_jrcfg *jrcfg __unused)
+{
+}
+
+__weak bool caam_hal_cfg_is_hab_jr(paddr_t jr_offset __unused)
+{
+	return false;
+}
diff --git a/core/drivers/crypto/caam/hal/common/hal_cfg_dt.c b/core/drivers/crypto/caam/hal/common/hal_cfg_dt.c
index 9a84b2c08..abe772386 100644
--- a/core/drivers/crypto/caam/hal/common/hal_cfg_dt.c
+++ b/core/drivers/crypto/caam/hal/common/hal_cfg_dt.c
@@ -8,12 +8,32 @@
 #include <caam_hal_cfg.h>
 #include <caam_hal_jr.h>
 #include <caam_jr.h>
+#include <config.h>
 #include <kernel/boot.h>
 #include <kernel/dt.h>
 #include <kernel/interrupt.h>
 #include <libfdt.h>
 #include <mm/core_memprot.h>
 #include <mm/core_mmu.h>
+#include <stdio.h>
+
+#if defined(CFG_MX8M)
+#define DTB_JR_PATH "/soc@0/bus@30800000/crypto@30900000/jr"
+#elif defined(CFG_MX8QM) || defined(CFG_MX8QXP) || defined(CFG_MX8DXL)
+#define DTB_JR_PATH "/bus@31400000/crypto@31400000/jr"
+#elif defined(CFG_MX8ULP)
+#define DTB_JR_PATH "/soc@0/bus@29000000/crypto@292e0000/jr"
+#elif defined(PLATFORM_FLAVOR_ls1046ardb) || \
+	defined(PLATFORM_FLAVOR_ls1043ardb) || \
+	defined(PLATFORM_FLAVOR_ls1012ardb)
+#define DTB_JR_PATH "/soc/crypto@1700000/jr"
+#elif defined(PLATFORM_FLAVOR_ls1028ardb) || \
+	defined(PLATFORM_FLAVOR_ls1088ardb) || \
+	defined(PLATFORM_FLAVOR_ls2088ardb)
+#define DTB_JR_PATH "/soc/crypto@8000000/jr"
+#else
+#define DTB_JR_PATH ""
+#endif
 
 static const char *dt_caam_match_table = {
 	"fsl,sec-v4.0",
@@ -134,4 +154,21 @@ void caam_hal_cfg_disable_jobring_dt(void *fdt, struct caam_jrcfg *jrcfg)
 			break;
 		}
 	}
+
+	if (node == -FDT_ERR_NOTFOUND && IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
+		char target[64] = { };
+		int ret = 0;
+
+		ret = snprintf(target, sizeof(target), DTB_JR_PATH "@%lx",
+			       jrcfg->offset);
+		if (ret < 0 || (size_t)ret >= sizeof(target)) {
+			EMSG("Cannot build JR DT overlay path");
+			return;
+		}
+
+		if (dt_overlay_disable_node(target)) {
+			EMSG("Cannot apply JR DT overlay");
+			return;
+		}
+	}
 }
diff --git a/core/drivers/crypto/caam/hal/common/hal_rng.c b/core/drivers/crypto/caam/hal/common/hal_rng.c
index aba8cbcd4..359952369 100644
--- a/core/drivers/crypto/caam/hal/common/hal_rng.c
+++ b/core/drivers/crypto/caam/hal/common/hal_rng.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2018-2021 NXP
+ * Copyright 2018-2021, 2024 NXP
  *
  * Brief   CAAM Random Number Generator Hardware Abstration Layer.
  *         Implementation of primitives to access HW.
@@ -91,6 +91,38 @@ enum caam_status caam_hal_rng_kick(vaddr_t baseaddr, uint32_t inc_delay)
 	 */
 	io_setbits32(baseaddr + TRNG_MCTL, TRNG_MCTL_PRGM | TRNG_MCTL_ACC);
 
+#if defined(CFG_MX8ULP)
+	/*
+	 * For i.MX8ULP.
+	 * TRNG silicon characterization is done to determine correct
+	 * rng configuration under different temperature and voltage
+	 * settings.
+	 * Configuring dual oscillator mode as it is more robust and faster.
+	 * Recommended values for dual oscillator are
+	 * ENT_DLY(25000), FRQMIN(5000), FRQMAX(10000).
+	 */
+#define MX8ULP_ENT_DELAY 25000
+#define MX8ULP_RNG_FRQ_MIN 5000
+#define MX8ULP_RNG_FRQ_MAX 10000
+
+	io_caam_write32(baseaddr + TRNG_SDCTL,
+			TRNG_SDCTL_ENT_DLY(MX8ULP_ENT_DELAY) |
+				TRNG_SDCTL_SAMP_SIZE(512));
+
+	io_caam_write32(baseaddr + TRNG_FRQMIN, MX8ULP_RNG_FRQ_MIN);
+
+	io_caam_write32(baseaddr + TRNG_FRQMAX, MX8ULP_RNG_FRQ_MAX);
+
+	val = io_caam_read32(baseaddr + RNG_OSC2_CTL);
+	/*
+	 * OSC2_CTL: Oscillator 2 Control Register
+	 * TRNG_ENT_CTL(1-0) = 00 : OSC1 default
+	 *                     01 : dual oscillator mode
+	 * setting the dual oscillator mode in OSC2_CTL
+	 */
+	val |= RNG_OSC2_CTL_TRNG_ENT_CTL;
+	io_caam_write32(baseaddr + RNG_OSC2_CTL, val);
+#else
 	/*
 	 * Configure the RNG Entropy Delay
 	 * Performance-wise, it does not make sense to
@@ -119,6 +151,7 @@ enum caam_status caam_hal_rng_kick(vaddr_t baseaddr, uint32_t inc_delay)
 
 	/* max. freq. count, equal to 16 times the entropy sample length */
 	io_caam_write32(baseaddr + TRNG_FRQMAX, ent_delay << 4);
+#endif
 
 	io_caam_write32(baseaddr + TRNG_RTSCMISC,
 			TRNG_RTSCMISC_RTY_CNT(2) | TRNG_RTSCMISC_LRUN_MAX(32));
diff --git a/core/drivers/crypto/caam/hal/common/hal_sm.c b/core/drivers/crypto/caam/hal/common/hal_sm.c
index 04552c26a..87e1651e0 100644
--- a/core/drivers/crypto/caam/hal/common/hal_sm.c
+++ b/core/drivers/crypto/caam/hal/common/hal_sm.c
@@ -101,13 +101,8 @@ void caam_hal_sm_set_access_group(vaddr_t jr_base, unsigned int partition,
 	if (!jr_base)
 		return;
 
-	if (grp1 != UINT32_MAX)
-		io_caam_write32(jr_base + SM_SMAG1(partition),
-				SHIFT_U32(1, grp1));
-
-	if (grp2 != UINT32_MAX)
-		io_caam_write32(jr_base + SM_SMAG2(partition),
-				SHIFT_U32(1, grp2));
+	io_caam_write32(jr_base + SM_SMAG1(partition), grp1);
+	io_caam_write32(jr_base + SM_SMAG2(partition), grp2);
 }
 
 void caam_hal_sm_open_access_perm(vaddr_t jr_base, unsigned int partition)
diff --git a/core/drivers/crypto/caam/hal/common/hal_sm_dt.c b/core/drivers/crypto/caam/hal/common/hal_sm_dt.c
new file mode 100644
index 000000000..b142c0432
--- /dev/null
+++ b/core/drivers/crypto/caam/hal/common/hal_sm_dt.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2019 NXP
+ *
+ * Brief   CAAM Secure Memory Hardware Abstration Layer.
+ */
+
+#include <caam_common.h>
+#include <caam_hal_sm.h>
+#include <kernel/dt.h>
+#include <libfdt.h>
+
+static const char *dt_sm_match_table = {
+	"fsl,imx6q-caam-sm",
+};
+
+void caam_hal_sm_get_base_dt(void *fdt, vaddr_t *sm_base)
+{
+	int node = 0;
+	int ret = 0;
+	size_t size = 0;
+
+	*sm_base = 0;
+
+	node = fdt_node_offset_by_compatible(fdt, 0, dt_sm_match_table);
+
+	if (node < 0) {
+		HAL_TRACE("CAAM Node not found err = 0x%X", node);
+		return;
+	}
+
+	/* Map the device in the system if not already present */
+	ret = dt_map_dev(fdt, node, sm_base, &size, DT_MAP_AUTO);
+	if (ret < 0) {
+		HAL_TRACE("Cannot map node 0x%X", node);
+		return;
+	}
+}
diff --git a/core/drivers/crypto/caam/hal/common/registers/rng_regs.h b/core/drivers/crypto/caam/hal/common/registers/rng_regs.h
index 260fef5f5..c2439de4b 100644
--- a/core/drivers/crypto/caam/hal/common/registers/rng_regs.h
+++ b/core/drivers/crypto/caam/hal/common/registers/rng_regs.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-2-Clause */
 /*
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2019, 2024 NXP
  *
  * Brief   Random Number Generator Registers.
  */
@@ -118,4 +118,13 @@
 #define RNG_STA_PR0			BIT32(4)
 #define RNG_STA_PR1			BIT32(5)
 
+/*
+ * RNG Oscillator 2 Control Register
+ */
+#define RNG_OSC2_CTL 0x06EC
+
+/*
+ * TRNG_ENT_CTL(1-0) = 01 : dual oscillator mode
+ */
+#define RNG_OSC2_CTL_TRNG_ENT_CTL 0x1
 #endif /* __RNG_REGS_H__ */
diff --git a/core/drivers/crypto/caam/hal/common/sub.mk b/core/drivers/crypto/caam/hal/common/sub.mk
index dbab332a7..0b6c4d3f2 100644
--- a/core/drivers/crypto/caam/hal/common/sub.mk
+++ b/core/drivers/crypto/caam/hal/common/sub.mk
@@ -2,7 +2,7 @@ incdirs-y += ../../include
 incdirs-y += ../$(CAAM_HAL_DIR)
 incdirs-y += .
 
-srcs-$(CFG_DT) += hal_cfg_dt.c
+srcs-$(CFG_DT) += hal_cfg_dt.c hal_sm_dt.c
 srcs-y += hal_cfg.c
 srcs-y += hal_rng.c
 srcs-y += hal_jr.c
diff --git a/core/drivers/crypto/caam/hal/imx_8m/hal_cfg.c b/core/drivers/crypto/caam/hal/imx_8m/hal_cfg.c
new file mode 100644
index 000000000..8009ff2c6
--- /dev/null
+++ b/core/drivers/crypto/caam/hal/imx_8m/hal_cfg.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2020-2021 NXP
+ *
+ * Brief   CAAM Configuration.
+ */
+#include <caam_hal_cfg.h>
+#include <caam_hal_jr.h>
+#include <kernel/dt.h>
+#include <registers/jr_regs.h>
+
+void caam_hal_cfg_hab_jr_mgmt(struct caam_jrcfg *jrcfg)
+{
+	void *fdt = NULL;
+	struct caam_jrcfg tmp_jrcfg = {
+		.offset = (CFG_JR_HAB_INDEX + 1) * JRX_BLOCK_SIZE,
+	};
+
+	fdt = get_dt();
+	if (fdt) {
+		/* Ensure Secure Job Ring is secure only into DTB */
+		caam_hal_cfg_disable_jobring_dt(fdt, &tmp_jrcfg);
+	}
+
+	caam_hal_jr_prepare_backup(jrcfg->base, tmp_jrcfg.offset);
+}
+
+bool caam_hal_cfg_is_hab_jr(paddr_t jr_offset)
+{
+	unsigned int jr_idx = JRX_IDX(jr_offset);
+
+	return jr_idx == CFG_JR_HAB_INDEX;
+}
diff --git a/core/drivers/crypto/caam/hal/imx_8m/sub.mk b/core/drivers/crypto/caam/hal/imx_8m/sub.mk
index 5231cce66..d8c98585a 100644
--- a/core/drivers/crypto/caam/hal/imx_8m/sub.mk
+++ b/core/drivers/crypto/caam/hal/imx_8m/sub.mk
@@ -5,3 +5,4 @@ incdirs-y += .
 srcs-y += hal_clk.c
 srcs-y += hal_ctrl.c
 srcs-y += hal_jr.c
+srcs-y += hal_cfg.c
diff --git a/core/drivers/crypto/caam/include/caam_ae.h b/core/drivers/crypto/caam/include/caam_ae.h
new file mode 100644
index 000000000..e02fcc9d0
--- /dev/null
+++ b/core/drivers/crypto/caam/include/caam_ae.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2024 NXP
+ */
+#ifndef __CAAM_AE_H__
+#define __CAAM_AE_H__
+
+#include <caam_common.h>
+
+/*
+ * Initialize the Authentication Encryption module
+ *
+ * @ctrl_addr   Controller base address
+ */
+enum caam_status caam_ae_init(vaddr_t ctrl_addr __unused);
+
+#endif /* __CAAM_AE_H__ */
diff --git a/core/drivers/crypto/caam/include/caam_desc_ccb_defines.h b/core/drivers/crypto/caam/include/caam_desc_ccb_defines.h
index 8bfd512ce..c6970278a 100644
--- a/core/drivers/crypto/caam/include/caam_desc_ccb_defines.h
+++ b/core/drivers/crypto/caam/include/caam_desc_ccb_defines.h
@@ -13,8 +13,13 @@
 
 /* CCB Clear Written Register */
 #define CLR_WR_IFIFO_NFIFO BIT32(31)
+#define CLR_WR_RST_C1_CHA  BIT32(29)
 #define CLR_WR_RST_C2_CHA  BIT32(28)
+#define CLR_WR_RST_C1_DNE  BIT32(27)
+#define CLR_WR_RST_C2_CTX  BIT32(21)
 #define CLR_WR_RST_C2_DSZ  BIT32(18)
+#define CLR_WR_RST_C1_DSZ  BIT32(2)
+#define CLR_WR_RST_C1_MDE  BIT32(0)
 
 /* CCB NFIFO */
 #define NFIFO_CLASS(cla)       SHIFT_U32(NFIFO_CLASS_##cla & 0x3, 30)
@@ -31,6 +36,7 @@
 #define NFIFO_STYPE_PAD        0x2
 
 #define NFIFO_DTYPE(data)      SHIFT_U32(NFIFO_DTYPE_##data & 0xF, 20)
+#define NFIFO_DTYPE_AAD        0x1
 #define NFIFO_DTYPE_MSG        0xF
 #define NFIFO_DTYPE_PKHA_N     0x8
 #define NFIFO_DTYPE_PKHA_A     0xC
diff --git a/core/drivers/crypto/caam/include/caam_desc_defines.h b/core/drivers/crypto/caam/include/caam_desc_defines.h
index 370388afa..805f840bc 100644
--- a/core/drivers/crypto/caam/include/caam_desc_defines.h
+++ b/core/drivers/crypto/caam/include/caam_desc_defines.h
@@ -136,6 +136,7 @@
  * FIFO LOAD Command fields
  */
 #define CMD_FIFO_LOAD_TYPE	CMD_TYPE(0x04)
+#define CMD_SEQ_FIFO_LOAD_TYPE  CMD_TYPE(0x05)
 
 /* Extended Length */
 #define FIFO_LOAD_EXT		BIT32(22)
@@ -317,6 +318,7 @@
 #define PROT_RSA_FMT(format)	SHIFT_U32((PROT_RSA_FMT_##format) & 0x1, 12)
 #define PROT_RSA_FMT_NO		0
 #define PROT_RSA_FMT_PKCS_V1_5	1
+#define PROT_RSA_FMT_MASK SHIFT_U32(0x1, 12)
 
 #define PROT_RSA_DEC_KEYFORM(format)	SHIFT_U32(((format) - 1) & 0x3, 0)
 
diff --git a/core/drivers/crypto/caam/include/caam_desc_helper.h b/core/drivers/crypto/caam/include/caam_desc_helper.h
index 7c1d25587..854487c82 100644
--- a/core/drivers/crypto/caam/include/caam_desc_helper.h
+++ b/core/drivers/crypto/caam/include/caam_desc_helper.h
@@ -33,6 +33,8 @@ void caam_desc_add_dmaobj(uint32_t *desc, struct caamdmaobj *data,
 	caam_desc_add_dmaobj(desc, data, ST_NOIMM(cla, src, 0))
 #define caam_desc_fifo_store(desc, data, src)                                  \
 	caam_desc_add_dmaobj(desc, data, FIFO_ST(CLASS_NO, src, 0))
+#define caam_desc_seq_in(desc, data)                                           \
+	caam_desc_add_dmaobj(desc, data, SEQ_IN_PTR(0))
 #define caam_desc_seq_out(desc, data)                                          \
 	caam_desc_add_dmaobj(desc, data, SEQ_OUT_PTR(0))
 
@@ -202,6 +204,13 @@ static inline void dump_desc(uint32_t *desc)
 	(CMD_FIFO_LOAD_TYPE | CMD_IMM | CMD_CLASS(cla) |                       \
 	 FIFO_LOAD_INPUT(dst) | FIFO_LOAD_ACTION(act) | FIFO_LOAD_LENGTH(len))
 
+/*
+ * SEQ FIFO Load from register src of length len
+ */
+#define FIFO_LD_SEQ(src, len)                                                  \
+	(CMD_SEQ_FIFO_LOAD_TYPE | FIFO_LOAD_INPUT(src) |                       \
+	 FIFO_LOAD_LENGTH(len))
+
 /*
  * Store value of length len from register src of class cla
  */
diff --git a/core/drivers/crypto/caam/include/caam_hal_cfg.h b/core/drivers/crypto/caam/include/caam_hal_cfg.h
index 2d805620a..4d4205734 100644
--- a/core/drivers/crypto/caam/include/caam_hal_cfg.h
+++ b/core/drivers/crypto/caam/include/caam_hal_cfg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-2-Clause */
 /*
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2021 NXP
  *
  * Brief   CAAM Configuration header.
  */
@@ -27,6 +27,20 @@ enum caam_status caam_hal_cfg_get_conf(struct caam_jrcfg *jrcfg);
  */
 void caam_hal_cfg_setup_nsjobring(struct caam_jrcfg *jrcfg);
 
+/*
+ * Removes the JR used by HAB from dtb and backup its DID
+ *
+ * @jrcfg   Job Ring configuration of HAB JR
+ */
+void caam_hal_cfg_hab_jr_mgmt(struct caam_jrcfg *jrcfg);
+
+/*
+ * Indicate if the job ring is used by the HAB
+ *
+ * @jr_offset   Job Ring offset
+ */
+bool caam_hal_cfg_is_hab_jr(paddr_t jr_offset);
+
 #ifdef CFG_DT
 /*
  * Returns the Job Ring configuration to be used by the TEE
diff --git a/core/drivers/crypto/caam/include/caam_trace.h b/core/drivers/crypto/caam/include/caam_trace.h
index 7e1bc646e..e202c6256 100644
--- a/core/drivers/crypto/caam/include/caam_trace.h
+++ b/core/drivers/crypto/caam/include/caam_trace.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-2-Clause */
 /*
- * Copyright 2019-2021, 2023 NXP
+ * Copyright 2019-2021, 2023-2024 NXP
  *
  * Brief   CAAM driver trace include file.
  *         Definition of the internal driver trace macros.
@@ -43,6 +43,7 @@
 #define DBG_TRACE_MP	 BIT32(15) /* MP trace */
 #define DBG_TRACE_SM	 BIT32(16) /* Secure Memory trace */
 #define DBG_TRACE_KEY	 BIT32(17) /* KEY trace */
+#define DBG_TRACE_AE	 BIT32(18) /* AE trace */
 
 /* HAL */
 #if CAAM_DBG_TRACE(HAL)
@@ -354,4 +355,27 @@
 #define SM_TRACE(...)
 #endif
 
+/* Cipher AE */
+#if CAAM_DBG_TRACE(AE)
+#define AE_TRACE DRV_TRACE
+#if CAAM_DBG_DESC(AE)
+#define AE_DUMPDESC(desc)                                              \
+	do {                                                           \
+		AE_TRACE("AE Descriptor");                             \
+		DRV_DUMPDESC(desc);                                    \
+	} while (0)
+#else
+#define AE_DUMPDESC(desc)
+#endif
+#if CAAM_DBG_BUF(AE)
+#define AE_DUMPBUF DRV_DUMPBUF
+#else
+#define AE_DUMPBUF(...)
+#endif
+#else
+#define AE_TRACE(...)
+#define AE_DUMPDESC(desc)
+#define AE_DUMPBUF(...)
+#endif
+
 #endif /* CAAM_TRACE_H__ */
diff --git a/core/drivers/crypto/caam/include/caam_utils_mem.h b/core/drivers/crypto/caam/include/caam_utils_mem.h
index cb7ce2c77..989e4c646 100644
--- a/core/drivers/crypto/caam/include/caam_utils_mem.h
+++ b/core/drivers/crypto/caam/include/caam_utils_mem.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-2-Clause */
 /*
- * Copyright 2018-2021 NXP
+ * Copyright 2018-2021, 2024 NXP
  *
  * Brief   Memory management utilities.
  *         Primitive to allocate, free memory.
@@ -95,7 +95,7 @@ void caam_free_buf(struct caambuf *buf);
 
 /*
  * Copy source data into the block buffer. Allocate block buffer if
- * it's not defined.
+ * it's not already allocated.
  *
  * @block  [in/out] Block buffer information. Return buffer filled.
  * @src    Source to copy
@@ -104,6 +104,17 @@ void caam_free_buf(struct caambuf *buf);
 enum caam_status caam_cpy_block_src(struct caamblock *block,
 				    struct caambuf *src, size_t offset);
 
+/*
+ * Copy source data into the buffer. Allocate buffer if
+ * it's not already allocated.
+ *
+ * @dst         [out] Destination data to allocate and fill
+ * @src_data    Source to copy
+ * @src_length  Length to copy
+ */
+enum caam_status caam_cpy_buf_src(struct caambuf *dst, uint8_t *src_data,
+				  size_t src_length);
+
 /*
  * Return the number of Physical Areas used by the buffer @buf.
  * If @pabufs is not NULL, function fills it with the Physical Areas used
diff --git a/core/drivers/crypto/caam/sub.mk b/core/drivers/crypto/caam/sub.mk
index 4906dfed2..7ca62774c 100644
--- a/core/drivers/crypto/caam/sub.mk
+++ b/core/drivers/crypto/caam/sub.mk
@@ -12,6 +12,7 @@ srcs-$(CFG_NXP_CAAM_SM_DRV) += caam_sm.c
 srcs-y += caam_key.c
 subdirs-$(call cfg-one-enabled, CFG_NXP_CAAM_HASH_DRV CFG_NXP_CAAM_HMAC_DRV) += hash
 subdirs-$(call cfg-one-enabled, CFG_NXP_CAAM_CIPHER_DRV CFG_NXP_CAAM_CMAC_DRV) += cipher
+subdirs-$(call cfg-one-enabled, CFG_NXP_CAAM_AE_CCM_DRV CFG_NXP_CAAM_AE_GCM_DRV) += ae
 subdirs-y += acipher
 subdirs-y += blob
 subdirs-$(CFG_NXP_CAAM_MP_DRV) += mp
diff --git a/core/drivers/crypto/caam/utils/utils_mem.c b/core/drivers/crypto/caam/utils/utils_mem.c
index e7c23cdee..db3a32a27 100644
--- a/core/drivers/crypto/caam/utils/utils_mem.c
+++ b/core/drivers/crypto/caam/utils/utils_mem.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2018-2021 NXP
+ * Copyright 2018-2021, 2024 NXP
  *
  * Brief   Memory management utilities.
  *         Primitive to allocate, free memory.
@@ -12,6 +12,8 @@
 #include <io.h>
 #include <kernel/cache_helpers.h>
 #include <mm/core_memprot.h>
+#include <tee/cache.h>
+#include <tee/entry_std.h>
 #include <string.h>
 
 #define MEM_TYPE_NORMAL 0      /* Normal allocation */
@@ -178,7 +180,7 @@ void caam_free_buf(struct caambuf *buf)
 bool caam_mem_is_cached_buf(void *buf, size_t size)
 {
 	enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
-	bool is_cached = false;
+	bool is_cached = true;
 
 	/*
 	 * First check if the buffer is a known memory area mapped
@@ -187,10 +189,11 @@ bool caam_mem_is_cached_buf(void *buf, size_t size)
 	 * it cacheable
 	 */
 	mtype = core_mmu_get_type_by_pa(virt_to_phys(buf));
-	if (mtype == MEM_AREA_MAXTYPE)
-		is_cached = true;
-	else
+
+	if (mtype != MEM_AREA_MAXTYPE)
 		is_cached = core_vbuf_is(CORE_MEM_CACHED, buf, size);
+	else if (core_vbuf_is(CORE_MEM_SDP_MEM, buf, size))
+		is_cached = tee_entry_is_sdp_cached();
 
 	return is_cached;
 }
@@ -230,6 +233,35 @@ end_cpy:
 	return ret;
 }
 
+enum caam_status caam_cpy_buf_src(struct caambuf *dst, uint8_t *src_data,
+				  size_t src_length)
+{
+	enum caam_status ret = CAAM_FAILURE;
+
+	if (!src_data || !dst)
+		return CAAM_FAILURE;
+
+	if (!src_length)
+		return CAAM_NO_ERROR;
+
+	if (!dst->data) {
+		/* Allocate the destination buffer */
+		ret = caam_alloc_align_buf(dst, src_length);
+		if (ret != CAAM_NO_ERROR) {
+			MEM_TRACE("Allocation buffer error");
+			return ret;
+		}
+	}
+
+	/* Do the copy */
+	memcpy(dst->data, src_data, dst->length);
+
+	/* Push data to physical memory */
+	cache_operation(TEE_CACHECLEAN, dst->data, dst->length);
+
+	return CAAM_NO_ERROR;
+}
+
 int caam_mem_get_pa_area(struct caambuf *buf, struct caambuf **out_pabufs)
 {
 	int nb_pa_area = 0;
diff --git a/core/drivers/crypto/ele/acipher/ecc.c b/core/drivers/crypto/ele/acipher/ecc.c
new file mode 100644
index 000000000..7fedb84c5
--- /dev/null
+++ b/core/drivers/crypto/ele/acipher/ecc.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright NXP 2023
+ */
+
+#include <drivers/ele/ele.h>
+#include <drivers/ele/key_mgmt.h>
+#include <drivers/ele/key_store.h>
+#include <drivers/ele/sign_verify.h>
+#include <drivers/ele/utils_mem.h>
+#include <drvcrypt.h>
+#include <drvcrypt_acipher.h>
+#include <ecc.h>
+#include <string.h>
+#include <tee/cache.h>
+#include <tee/tee_cryp_utl.h>
+#include <tee_api_defines_extensions.h>
+#include <utee_defines.h>
+#include <util.h>
+
+static uint32_t algo_tee2ele(uint32_t algo)
+{
+	switch (algo) {
+	case TEE_ALG_ECDSA_P224:
+		return ELE_ALGO_ECDSA_SHA224;
+	case TEE_ALG_ECDSA_P256:
+		return ELE_ALGO_ECDSA_SHA256;
+	case TEE_ALG_ECDSA_P384:
+		return ELE_ALGO_ECDSA_SHA384;
+	case TEE_ALG_ECDSA_P521:
+		return ELE_ALGO_ECDSA_SHA512;
+	default:
+		EMSG("algorithm %#" PRIx32 " not enabled", algo);
+		return ELE_ALGO_ECDSA_NOT_SUPPORTED;
+	}
+}
+
+static TEE_Result do_shared_secret(struct drvcrypt_secret_data *sdata __unused)
+{
+	return TEE_ERROR_NOT_SUPPORTED;
+}
+
+static TEE_Result get_key_size(uint32_t curve, size_t *key_size_bits)
+{
+	if (!key_size_bits) {
+		EMSG("Key size is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	switch (curve) {
+	case TEE_ECC_CURVE_NIST_P224:
+		*key_size_bits = 224;
+		break;
+	case TEE_ECC_CURVE_NIST_P256:
+		*key_size_bits = 256;
+		break;
+	case TEE_ECC_CURVE_NIST_P384:
+		*key_size_bits = 384;
+		break;
+	case TEE_ECC_CURVE_NIST_P521:
+		*key_size_bits = 521;
+		break;
+	default:
+		return TEE_ERROR_NOT_SUPPORTED;
+	}
+
+	return TEE_SUCCESS;
+}
+
+static TEE_Result get_permitted_algo(uint32_t curve, uint32_t *permitted_algo)
+{
+	if (!permitted_algo) {
+		EMSG("permitted_algo is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	switch (curve) {
+	case TEE_ECC_CURVE_NIST_P224:
+		*permitted_algo = ELE_ALGO_ECDSA_SHA224;
+		break;
+	case TEE_ECC_CURVE_NIST_P256:
+		*permitted_algo = ELE_ALGO_ECDSA_SHA256;
+		break;
+	case TEE_ECC_CURVE_NIST_P384:
+		*permitted_algo = ELE_ALGO_ECDSA_SHA384;
+		break;
+	case TEE_ECC_CURVE_NIST_P521:
+		*permitted_algo = ELE_ALGO_ECDSA_SHA512;
+		break;
+	default:
+		return TEE_ERROR_NOT_SUPPORTED;
+	}
+
+	return TEE_SUCCESS;
+}
+
+static TEE_Result do_gen_keypair(struct ecc_keypair *key,
+				 size_t size_bits __unused)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t key_mgmt_handle = 0;
+	uint32_t key_id = 0;
+	size_t key_size = 0;
+	size_t key_size_bits = 0;
+	size_t public_key_size = 0;
+	uint32_t key_store_handle = 0;
+	uint32_t permitted_algo = 0;
+	uint8_t *public_key = NULL;
+
+	if (!key) {
+		EMSG("key is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	res = get_permitted_algo(key->curve, &permitted_algo);
+	if (res != TEE_SUCCESS) {
+		EMSG("Curve not supported");
+		return res;
+	}
+
+	res = get_key_size(key->curve, &key_size_bits);
+	if (res != TEE_SUCCESS) {
+		EMSG("Curve not supported");
+		return res;
+	}
+
+	key_size = ROUNDUP_DIV(key_size_bits, 8);
+	public_key_size = key_size * 2;
+
+	public_key = calloc(1, public_key_size);
+	if (!public_key) {
+		EMSG("Public key allocation failed");
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
+
+	res = imx_ele_get_global_key_store_handle(&key_store_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Getting key store handle failed");
+		goto out;
+	}
+
+	res = imx_ele_key_mgmt_open(key_store_handle, &key_mgmt_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key management open failed");
+		goto out;
+	}
+
+	res = imx_ele_generate_key(key_mgmt_handle, public_key_size,
+				   ELE_KEY_GROUP_VOLATILE, false, false,
+				   ELE_KEY_LIFETIME_VOLATILE,
+				   ELE_KEY_USAGE_SIGN_HASH |
+				   ELE_KEY_USAGE_VERIFY_HASH |
+				   ELE_KEY_USAGE_SIGN_MSG |
+				   ELE_KEY_USAGE_VERIFY_MSG,
+				   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1,
+				   key_size_bits, permitted_algo,
+				   ELE_KEY_LIFECYCLE_DEVICE,
+				   public_key, &key_id);
+
+	res |= imx_ele_key_mgmt_close(key_mgmt_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key generation/Key management close failed");
+		goto out;
+	}
+
+	crypto_bignum_bin2bn(public_key, key_size, key->x);
+	crypto_bignum_bin2bn(public_key + key_size, key_size, key->y);
+
+	crypto_bignum_bin2bn((uint8_t *)&key_id, sizeof(key_id), key->d);
+
+out:
+	free(public_key);
+	return res;
+}
+
+static TEE_Result do_sign(struct drvcrypt_sign_data *sdata)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t key_id = 0;
+	uint32_t sig_gen_handle = 0;
+	uint32_t sig_scheme = 0;
+	size_t signature_len = 0;
+	struct ecc_keypair *key = NULL;
+	uint32_t key_store_handle = 0;
+
+	if (!sdata) {
+		EMSG("sdata is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	if (!sdata->key || !sdata->message.data || !sdata->signature.data ||
+	    !sdata->message.length) {
+		EMSG("Invalid key, message or signature pointer");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	signature_len = sdata->size_sec * 2;
+	key = sdata->key;
+	if (crypto_bignum_num_bytes(key->d) != sizeof(uint32_t))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	sig_scheme = algo_tee2ele(sdata->algo);
+	if (sig_scheme == ELE_ALGO_ECDSA_NOT_SUPPORTED) {
+		EMSG("Signature scheme not supported");
+		return TEE_ERROR_NOT_SUPPORTED;
+	}
+
+	crypto_bignum_bn2bin(key->d, (uint8_t *)&key_id);
+
+	res = imx_ele_get_global_key_store_handle(&key_store_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Getting key store handle failed");
+		return res;
+	}
+
+	res = imx_ele_sig_gen_open(key_store_handle, &sig_gen_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature generation service flow open failed");
+		return res;
+	}
+
+	res = imx_ele_signature_generate(sig_gen_handle, key_id,
+					 sdata->message.data,
+					 sdata->message.length,
+					 sdata->signature.data,
+					 signature_len,
+					 sig_scheme,
+					 ELE_SIG_GEN_MSG_TYPE_MESSAGE);
+
+	res |= imx_ele_sig_gen_close(sig_gen_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature generation/Signature flow close failed");
+		goto out;
+	}
+
+	sdata->signature.length = signature_len;
+
+out:
+	return res;
+}
+
+static TEE_Result do_verify(struct drvcrypt_sign_data *sdata)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t sig_verify_handle = 0;
+	uint32_t sig_scheme = 0;
+	size_t key_size_bits = 0;
+	size_t key_size = 0;
+	struct ecc_public_key *key = NULL;
+	size_t public_key_size = 0;
+	uint32_t session_handle = 0;
+	uint8_t *public_key = NULL;
+
+	if (!sdata) {
+		EMSG("sdata is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	if (!sdata->key || !sdata->message.data || !sdata->signature.data ||
+	    !sdata->message.length || !sdata->signature.length) {
+		EMSG("Invalid key, message or signature pointer");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	key = sdata->key;
+
+	res = get_key_size(key->curve, &key_size_bits);
+	if (res != TEE_SUCCESS) {
+		EMSG("Curve not supported");
+		return res;
+	}
+
+	sig_scheme = algo_tee2ele(sdata->algo);
+	if (sig_scheme == ELE_ALGO_ECDSA_NOT_SUPPORTED) {
+		EMSG("Signature scheme not supported");
+		return TEE_ERROR_NOT_SUPPORTED;
+	}
+
+	key_size = ROUNDUP_DIV(key_size_bits, 8);
+	public_key_size = key_size * 2;
+
+	public_key = calloc(1, public_key_size);
+	if (!public_key) {
+		EMSG("Public key allocation failed");
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
+
+	crypto_bignum_bn2bin(key->x, public_key);
+	crypto_bignum_bn2bin(key->y, public_key + key_size);
+
+	res = imx_ele_get_global_session_handle(&session_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Getting global session handle failed");
+		goto out;
+	}
+
+	res = imx_ele_sig_verify_open(session_handle, &sig_verify_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature verification service flow open failed");
+		goto out;
+	}
+
+	res = imx_ele_signature_verification(sig_verify_handle,
+					     public_key,
+					     sdata->message.data,
+					     sdata->message.length,
+					     sdata->signature.data,
+					     sdata->signature.length,
+					     public_key_size, key_size_bits,
+					     ELE_KEY_TYPE_ECC_PUB_KEY_SECP_R1,
+					     sig_scheme,
+					     ELE_SIG_GEN_MSG_TYPE_MESSAGE);
+
+	res |= imx_ele_sig_verify_close(sig_verify_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Signature verif/Signature verif flow close failed");
+
+out:
+	free(public_key);
+	return res;
+}
+
+static TEE_Result do_allocate_keypair(struct ecc_keypair *key,
+				      size_t size_bits)
+{
+	if (!key) {
+		EMSG("key is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	/* Initialize the key fields to NULL */
+	memset(key, 0, sizeof(*key));
+
+	/* Allocate Secure Scalar */
+	key->d = crypto_bignum_allocate(size_bits);
+	if (!key->d)
+		goto out;
+
+	/* Allocate Public coordinate X */
+	key->x = crypto_bignum_allocate(size_bits);
+	if (!key->x)
+		goto out;
+
+	/* Allocate Public coordinate Y */
+	key->y = crypto_bignum_allocate(size_bits);
+	if (!key->y)
+		goto out;
+
+	return TEE_SUCCESS;
+
+out:
+	crypto_bignum_free(key->d);
+	crypto_bignum_free(key->x);
+
+	return TEE_ERROR_OUT_OF_MEMORY;
+}
+
+static TEE_Result do_allocate_publickey(struct ecc_public_key *key,
+					size_t size_bits)
+{
+	if (!key) {
+		EMSG("key is not valid");
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	/* Initialize the key fields to NULL */
+	memset(key, 0, sizeof(*key));
+
+	/* Allocate Public coordinate X */
+	key->x = crypto_bignum_allocate(size_bits);
+	if (!key->x)
+		goto out;
+
+	/* Allocate Public coordinate Y */
+	key->y = crypto_bignum_allocate(size_bits);
+	if (!key->y)
+		goto out;
+
+	return TEE_SUCCESS;
+
+out:
+	crypto_bignum_free(key->x);
+
+	return TEE_ERROR_OUT_OF_MEMORY;
+}
+
+static void do_free_publickey(struct ecc_public_key *s)
+{
+	if (!s)
+		return;
+
+	crypto_bignum_free(s->x);
+	crypto_bignum_free(s->y);
+}
+
+static struct drvcrypt_ecc driver_ecc = {
+	.alloc_keypair = do_allocate_keypair,
+	.alloc_publickey = do_allocate_publickey,
+	.free_publickey = do_free_publickey,
+	.gen_keypair = do_gen_keypair,
+	.sign = do_sign,
+	.verify = do_verify,
+	.shared_secret = do_shared_secret,
+};
+
+TEE_Result imx_ele_ecc_init(void)
+{
+	return drvcrypt_register_ecc(&driver_ecc);
+}
diff --git a/core/drivers/crypto/ele/acipher/sub.mk b/core/drivers/crypto/ele/acipher/sub.mk
new file mode 100644
index 000000000..2ec64ece3
--- /dev/null
+++ b/core/drivers/crypto/ele/acipher/sub.mk
@@ -0,0 +1,3 @@
+incdirs-y += ../include
+
+srcs-$(CFG_IMX_ELE_ECC_DRV) += ecc.c
diff --git a/core/drivers/crypto/ele/crypto.mk b/core/drivers/crypto/ele/crypto.mk
new file mode 100644
index 000000000..da1c354f8
--- /dev/null
+++ b/core/drivers/crypto/ele/crypto.mk
@@ -0,0 +1,20 @@
+ifeq ($(CFG_IMX_ELE),y)
+CFG_IMX_ELE_ECC_DRV ?= n
+CFG_IMX_ELE_ACIPHER_DRV ?= $(CFG_IMX_ELE_ECC_DRV)
+
+# If IMX ELE Driver is supported, the Crypto Driver interfacing
+# it with generic crypto API can be enabled.
+CFG_CRYPTO_DRIVER ?= $(CFG_IMX_ELE_ACIPHER_DRV)
+
+ifeq ($(CFG_CRYPTO_DRIVER),y)
+CFG_CRYPTO_DRIVER_DEBUG ?= 0
+CFG_CRYPTO_DRV_ECC ?= $(CFG_IMX_ELE_ECC_DRV)
+CFG_CRYPTO_DRV_ACIPHER ?= $(CFG_IMX_ELE_ACIPHER_DRV)
+endif # CFG_CRYPTO_DRIVER
+
+# Issues in the ELE FW prevent OPTEE and Kernel from using
+# the RNG concurrently at runtime. To prevent any issue,
+# use the software RNG instead in OPTEE.
+CFG_WITH_SOFTWARE_PRNG ?= y
+
+endif # CFG_IMX_ELE
diff --git a/core/drivers/crypto/ele/ele.c b/core/drivers/crypto/ele/ele.c
new file mode 100644
index 000000000..651a5daa8
--- /dev/null
+++ b/core/drivers/crypto/ele/ele.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2022-2023 NXP
+ */
+#include <drivers/ele_extension.h>
+#include <drivers/ele/ele.h>
+#include <drivers/ele/key_store.h>
+#include <drivers/ele/sign_verify.h>
+#include <drivers/imx_mu.h>
+#include <ecc.h>
+#include <initcall.h>
+#include <kernel/boot.h>
+#include <kernel/delay.h>
+#include <kernel/panic.h>
+#include <kernel/tee_common_otp.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <rng_support.h>
+#include <stdint.h>
+#include <string_ext.h>
+#include <tee/cache.h>
+#include <tee_api_defines.h>
+#include <trace.h>
+#include <types_ext.h>
+#include <utee_types.h>
+#include <util.h>
+#include <utils_trace.h>
+
+#define ELE_BASE_ADDR MU_BASE
+#define ELE_BASE_SIZE MU_SIZE
+
+#define ELE_COMMAND_SUCCEED 0xd6
+
+#define ELE_CMD_SESSION_OPEN	    0x10
+#define ELE_CMD_SESSION_CLOSE	    0x11
+#define ELE_CMD_RNG_GET		    0xCD
+#define ELE_CMD_TRNG_STATE	    0xA4
+#define ELE_CMD_GET_INFO	    0xDA
+#define ELE_CMD_DERIVE_KEY	    0xA9
+#define ELE_CMD_SAB_INIT 0x17
+
+#define IMX_ELE_TRNG_STATUS_READY 0x3
+#define IMX_ELE_CSAL_STATUS_READY 0x2
+
+#define ELE_MU_IRQ 0x0
+
+#define CACHELINE_SIZE 64
+
+register_phys_mem_pgdir(MEM_AREA_IO_SEC, MU_BASE, MU_SIZE);
+
+struct get_info_rsp {
+	uint32_t rsp_code;
+	uint16_t soc_id;
+	uint16_t soc_rev;
+	uint16_t lifecycle;
+	uint8_t sssm_state;
+	uint8_t unused_1;
+	uint32_t uid[4];
+	uint32_t sha256_rom_patch[8];
+	uint32_t sha256_firmware[8];
+	uint32_t oem_srkh[16];
+	uint8_t trng_state;
+	uint8_t csal_state;
+	uint8_t imem_state;
+	uint8_t unused_2;
+} __packed;
+
+/*
+ * The CRC for the message is computed xor-ing all the words of the message:
+ * the header and all the words except the word storing the CRC.
+ *
+ * @msg MU message to hash
+ */
+static uint32_t compute_crc(const struct imx_mu_msg *msg)
+{
+	uint32_t crc = 0;
+	uint8_t i = 0;
+	uint32_t *payload = (uint32_t *)msg;
+
+	assert(msg);
+
+	for (i = 0; i < msg->header.size - 1; i++)
+		crc ^= payload[i];
+
+	return crc;
+}
+
+void update_crc(struct imx_mu_msg *msg)
+{
+	assert(msg);
+	/*
+	 * The CRC field is the last element of array. The size of the header
+	 * is also subtracted from CRC computation.
+	 */
+	msg->data.u32[msg->header.size - 2] = compute_crc(msg);
+}
+
+/*
+ * Return the given MU base address, depending on the MMU state.
+ *
+ * @pa MU physical base address
+ * @sz MU size
+ */
+static vaddr_t imx_ele_init(paddr_t pa, size_t sz)
+{
+	static bool is_initialized;
+	vaddr_t va = 0;
+
+	assert(pa && sz);
+
+	if (cpu_mmu_enabled())
+		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
+	else
+		va = (vaddr_t)pa;
+
+	if (!is_initialized) {
+		imx_mu_init(va);
+		is_initialized = true;
+	}
+
+	return va;
+}
+
+struct response_code get_response_code(uint32_t word)
+{
+	struct response_code rsp = {
+		.rating_extension = (word & GENMASK_32(31, 16)) >> 16,
+		.rating = (word & GENMASK_32(15, 8)) >> 8,
+		.status = (word & GENMASK_32(7, 0)) >> 0,
+	};
+
+	return rsp;
+}
+
+enum ele_status {
+	ELE_GENERAL_ERROR = 0x00,
+	ELE_INVALID_ADDRESS = 0x02,
+	ELE_UNKNOWN_IDENTIFIER,
+	ELE_INVALID_ARGUMENT,
+	ELE_NVM_ERROR,
+	ELE_OUT_OF_MEMORY,
+	ELE_UNKNOWN_HANDLE,
+	ELE_KEY_STORE_AUTH_FAILED = 0x09,
+	ELE_IDENTIFIER_CONFLICT = 0x0B,
+	ELE_UNSUPPORTED_COMMAND = 0x0D,
+	ELE_KEYSTORE_CONFLICT = 0x0F,
+	ELE_NO_SPACE_IN_KEY_STORE = 0x19,
+	ELE_OUTPUT_BUFFER_SHORT = 0x1D,
+	ELE_CRC_ERROR = 0xB9,
+};
+
+static TEE_Result ele_status_to_tee_result(uint32_t word)
+{
+	struct response_code rsp_code = {};
+
+	rsp_code = get_response_code(word);
+	if (rsp_code.status == ELE_COMMAND_SUCCEED)
+		return TEE_SUCCESS;
+
+	switch (rsp_code.rating) {
+	case ELE_OUT_OF_MEMORY:
+	case ELE_NO_SPACE_IN_KEY_STORE:
+		return TEE_ERROR_OUT_OF_MEMORY;
+	case ELE_INVALID_ARGUMENT:
+		return TEE_ERROR_BAD_PARAMETERS;
+	case ELE_UNKNOWN_HANDLE:
+	case ELE_UNKNOWN_IDENTIFIER:
+		return TEE_ERROR_ITEM_NOT_FOUND;
+	case ELE_OUTPUT_BUFFER_SHORT:
+		return TEE_ERROR_SHORT_BUFFER;
+	case ELE_UNSUPPORTED_COMMAND:
+		return TEE_ERROR_NOT_SUPPORTED;
+	default:
+		break;
+	}
+	return TEE_ERROR_GENERIC;
+}
+
+TEE_Result imx_ele_call(struct imx_mu_msg *msg)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	vaddr_t va = 0;
+
+	assert(msg);
+
+	if (msg->header.tag != ELE_REQUEST_TAG) {
+		EMSG("Request has invalid tag: %#"PRIx8" instead of %#"PRIx8,
+		     msg->header.tag, ELE_REQUEST_TAG);
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	va = imx_ele_init(ELE_BASE_ADDR, ELE_BASE_SIZE);
+	if (!va) {
+		EMSG("Fail to get base address");
+		return TEE_ERROR_GENERIC;
+	}
+
+	ele_trace_print_msg(*msg);
+
+	res = imx_mu_call(va, msg, true);
+	if (res) {
+		EMSG("Failed to transmit message: %#" PRIx32, res);
+		return res;
+	}
+
+	if (msg->header.tag != ELE_RESPONSE_TAG) {
+		EMSG("Response has invalid tag: %#" PRIx8
+		     " instead of %#" PRIx8,
+		     msg->header.tag, ELE_RESPONSE_TAG);
+		return TEE_ERROR_GENERIC;
+	}
+
+	ele_trace_print_msg(*msg);
+
+	return ele_status_to_tee_result(msg->data.u32[0]);
+}
+
+/*
+ * Initialize EdgeLock Enclave services
+ */
+static TEE_Result __maybe_unused imx_ele_sab_init(void)
+{
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = 1,
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SAB_INIT,
+	};
+
+	return imx_ele_call(&msg);
+}
+
+TEE_Result imx_ele_session_open(uint32_t *session_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct open_session_cmd {
+		uint8_t rsvd1;
+		uint8_t interrupt_num;
+		uint16_t rsvd2;
+		uint8_t priority;
+		uint8_t op_mode;
+		uint16_t rsvd3;
+	} __packed cmd = {
+		.rsvd1 = 0,
+		.interrupt_num = ELE_MU_IRQ,
+		.rsvd2 = 0,
+		.priority = 0,
+		.op_mode = 0,
+		.rsvd3 = 0,
+	};
+	struct open_session_rsp {
+		uint32_t rsp_code;
+		uint32_t session_handle;
+	} rsp = { };
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SESSION_OPEN,
+	};
+
+	assert(session_handle);
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res)
+		return res;
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	*session_handle = rsp.session_handle;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_session_close(uint32_t session_handle)
+{
+	struct close_session_cmd {
+		uint32_t session_handle;
+	} cmd = {
+		.session_handle = session_handle,
+	};
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SESSION_CLOSE,
+	};
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	return imx_ele_call(&msg);
+}
+
+static TEE_Result imx_ele_get_device_info(struct get_info_rsp *rsp)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct imx_ele_buf output = {};
+	struct {
+		uint32_t addr_msb;
+		uint32_t addr_lsb;
+		uint16_t size;
+	} __packed cmd = {};
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_BASELINE,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_GET_INFO,
+	};
+
+	if (!rsp)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_buf_alloc(&output, NULL, sizeof(*rsp));
+	if (res)
+		goto out;
+
+	cmd.addr_msb = output.paddr_msb;
+	cmd.addr_lsb = output.paddr_lsb;
+	cmd.size = sizeof(*rsp);
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res)
+		goto out;
+
+	res = imx_ele_buf_copy(&output, (uint8_t *)rsp, sizeof(*rsp));
+out:
+	imx_ele_buf_free(&output);
+
+	return res;
+}
+
+int tee_otp_get_die_id(uint8_t *buffer, size_t len)
+{
+	static uint32_t uid[4];
+	static bool is_fetched;
+	struct get_info_rsp rsp = {};
+
+	assert(buffer && len);
+
+	if (is_fetched)
+		goto out;
+
+	if (imx_ele_get_device_info(&rsp))
+		goto err;
+
+	memcpy(uid, rsp.uid, MIN(sizeof(rsp.uid), len));
+
+	is_fetched = true;
+out:
+	memcpy(buffer, uid, MIN(sizeof(uid), len));
+
+	return 0;
+err:
+	panic("Fail to get the device UID");
+}
+
+TEE_Result imx_ele_get_global_session_handle(uint32_t *session_handle)
+{
+	static uint32_t imx_ele_session_handle;
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	if (!session_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (imx_ele_session_handle) {
+		res = TEE_SUCCESS;
+		goto out;
+	}
+
+	res = imx_ele_session_open(&imx_ele_session_handle);
+	if (res) {
+		EMSG("Failed to open global session");
+		return res;
+	}
+
+out:
+	*session_handle = imx_ele_session_handle;
+	return res;
+}
+
+static TEE_Result imx_ele_global_init(void)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	res = imx_ele_sab_init();
+	if (res) {
+		EMSG("Failed to initialize Edgelock Enclave services");
+		goto err;
+	}
+
+	res = imx_ele_ecc_init();
+	if (res)
+		EMSG("ELE ECC driver registration failed");
+
+err:
+	return res;
+}
+driver_init(imx_ele_global_init);
+
+#if defined(CFG_MX93) || defined(CFG_MX91) || defined(CFG_MX95)
+/*
+ * Key buffer pointer must be align on a cache line
+ * as cache invalidate is done after key derivation.
+ * As key derivation can be done in secure OnChip RAM buffer,
+ * to prevent secret key leak in DDR, we could not used
+ * a temporary allocated aligned imx_ele_buffer to derive a key.
+ * Cause it would expose the derived key in DDR.
+ */
+TEE_Result imx_ele_derive_key(const uint8_t *ctx, size_t ctx_size, uint8_t *key,
+			      size_t key_size)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t msb = 0;
+	uint32_t lsb = 0;
+	paddr_t pa = 0;
+	struct key_derive_cmd {
+		uint32_t key_addr_msb;
+		uint32_t key_addr_lsb;
+		uint32_t ctx_addr_msb;
+		uint32_t ctx_addr_lsb;
+		uint16_t key_size;
+		uint16_t ctx_size;
+		uint32_t crc;
+	} __packed cmd = {};
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_BASELINE,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_DERIVE_KEY,
+	};
+	struct imx_ele_buf ele_ctx = {};
+
+	assert(ctx && key);
+
+	/*
+	 * As we do a cache invalidate on key we must ensure that the buffer
+	 * is aligned on a cache line
+	 */
+	if (!IS_ALIGNED((uintptr_t)key, CACHELINE_SIZE))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_buf_alloc(&ele_ctx, ctx, ctx_size);
+	if (res)
+		return res;
+
+	pa = virt_to_phys((void *)key);
+	/*
+	 * ELE need address align on 4 bytes.
+	 * Check is needed as no copy could be done.
+	 * Key buffer is potentially allocated in
+	 * OCRAM and must not be exposed to DDR.
+	 */
+	if (!IS_ALIGNED_WITH_TYPE(pa, uint32_t)) {
+		EMSG("Key address is not aligned");
+		res = TEE_ERROR_BAD_PARAMETERS;
+		goto out;
+	}
+
+	/*
+	 * Intermediate msb and lsb values are needed. Directly using
+	 * key_addr_msb and key_addr_lsb might be unaligned because of the
+	 * __packed attribute of key_derive_cmd {}
+	 */
+	reg_pair_from_64((uint64_t)pa, &msb, &lsb);
+
+	cmd.key_addr_lsb = lsb;
+	cmd.key_addr_msb = msb;
+	cmd.key_size = key_size;
+
+	cmd.ctx_addr_lsb = ele_ctx.paddr_lsb;
+	cmd.ctx_addr_msb = ele_ctx.paddr_msb;
+	cmd.ctx_size = ctx_size;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&msg);
+
+	memzero_explicit(key, key_size);
+	cache_operation(TEE_CACHEFLUSH, (void *)key, key_size);
+
+	res = imx_ele_call(&msg);
+	if (res)
+		goto out;
+
+	cache_operation(TEE_CACHEINVALIDATE, (void *)key, key_size);
+out:
+	imx_ele_buf_free(&ele_ctx);
+
+	return res;
+}
+
+TEE_Result tee_otp_get_hw_unique_key(struct tee_hw_unique_key *hwkey)
+{
+	static const char pattern[] = "TEE_for_HUK_ELE";
+	static uint8_t key[HW_UNIQUE_KEY_LENGTH] __aligned(CACHELINE_SIZE);
+	static bool is_fetched;
+
+	if (is_fetched)
+		goto out;
+
+	if (imx_ele_derive_key((const uint8_t *)pattern, sizeof(pattern), key,
+			       sizeof(key)))
+		panic("Fail to get HUK from ELE");
+
+	is_fetched = true;
+out:
+	memcpy(hwkey->data, key,
+	       MIN(sizeof(key), (size_t)HW_UNIQUE_KEY_LENGTH));
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Get the current state of the ELE TRNG
+ */
+static TEE_Result imx_ele_rng_get_trng_state(void)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct rng_get_trng_state_msg_rsp {
+		uint32_t rsp_code;
+		uint8_t trng_state;
+		uint8_t csal_state;
+	} __packed rsp = { };
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_BASELINE,
+		.header.size = 1,
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_TRNG_STATE,
+	};
+
+	res = imx_ele_call(&msg);
+	if (res)
+		return res;
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	if (rsp.trng_state != IMX_ELE_TRNG_STATUS_READY ||
+	    rsp.csal_state != IMX_ELE_CSAL_STATUS_READY)
+		return TEE_ERROR_BUSY;
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Get random data from the EdgeLock Enclave.
+ *
+ * This function can be called when the MMU is off or on.
+ * virtual/physical address translation and cache maintenance
+ * is performed if needed.
+ *
+ * @buffer: data output
+ * @size: RNG data size
+ */
+static TEE_Result imx_ele_rng_get_random(uint8_t *buffer, size_t size)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct imx_ele_buf rng = {};
+
+	struct rng_get_random_cmd {
+		uint32_t addr_msb;
+		uint32_t addr_lsb;
+		uint32_t size;
+	} cmd = { };
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_RNG_GET,
+	};
+
+	if (!buffer || !size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (cpu_mmu_enabled()) {
+		res = imx_ele_buf_alloc(&rng, NULL, size);
+		if (res != TEE_SUCCESS)
+			return res;
+
+		cmd.addr_msb = rng.paddr_msb;
+		cmd.addr_lsb = rng.paddr_lsb;
+	} else {
+		paddr_t pa = (paddr_t)buffer;
+
+		if (!IS_ALIGNED_WITH_TYPE(pa, uint32_t))
+			return TEE_ERROR_BAD_PARAMETERS;
+
+		reg_pair_from_64((uint64_t)pa, &cmd.addr_msb, &cmd.addr_lsb);
+	}
+
+	cmd.size = (uint32_t)size;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res)
+		goto out;
+
+	if (cpu_mmu_enabled())
+		res = imx_ele_buf_copy(&rng, buffer, size);
+out:
+	imx_ele_buf_free(&rng);
+
+	return res;
+}
+
+unsigned long plat_get_aslr_seed(void)
+{
+	uint64_t timeout = timeout_init_us(10 * 1000);
+	unsigned long aslr __aligned(CACHELINE_SIZE) = 0;
+
+	/*
+	 * This function can only be called when the MMU is off. No
+	 * virtual/physical address translation is performed, nor cache
+	 * maintenance.
+	 */
+	assert(!cpu_mmu_enabled());
+
+	/*
+	 * Check the current TRNG state of the ELE. The TRNG must be
+	 * started with a command earlier in the boot to allow the TRNG
+	 * to generate enough entropy.
+	 */
+	while (imx_ele_rng_get_trng_state() == TEE_ERROR_BUSY)
+		if (timeout_elapsed(timeout))
+			panic("ELE RNG is busy");
+
+	if (imx_ele_rng_get_random((uint8_t *)&aslr, sizeof(aslr)))
+		panic("Cannot retrieve random data from ELE");
+
+	return aslr;
+}
+
+#ifndef CFG_WITH_SOFTWARE_PRNG
+TEE_Result hw_get_random_bytes(void *buf, size_t len)
+{
+	return imx_ele_rng_get_random((uint8_t *)buf, len);
+}
+#endif /* CFG_WITH_SOFTWARE_PRNG */
+#else
+TEE_Result imx_ele_derive_key(const uint8_t *ctx __unused,
+			      size_t ctx_size __unused, uint8_t *key __unused,
+			      size_t key_size __unused)
+{
+	return TEE_ERROR_NOT_IMPLEMENTED;
+}
+#endif /* CFG_MX93 */
diff --git a/core/drivers/crypto/ele/fuse.c b/core/drivers/crypto/ele/fuse.c
new file mode 100644
index 000000000..98e666838
--- /dev/null
+++ b/core/drivers/crypto/ele/fuse.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+#include <drivers/ele_extension.h>
+#include <drivers/ele/ele.h>
+#include <drivers/ele/utils_mem.h>
+#include <initcall.h>
+#include <mm/core_memprot.h>
+#include <stdint.h>
+#include <string.h>
+
+#ifdef CFG_IMX_OCOTP
+#error "CFG_IMX_OCOTP and CFG_IMX_ELE are exclusive"
+#endif
+
+#define ELE_CMD_READ_COMMON 0x97
+#define ELE_CMD_READ_SHADOW 0xF3
+
+struct ele_instance {
+	unsigned int nb_banks;
+	unsigned int nb_words;
+	bool (*fuse_map)(unsigned int fuse_index);
+};
+
+static const struct ele_instance *g_ele;
+
+/*
+ * Read fuse value.
+ *
+ * @fuse_index: fuse id
+ * @fuse_value: fuse value
+ * @command:	ELE read fuse command
+ */
+static TEE_Result imx_ele_read_fuse(unsigned int fuse_index,
+				    uint32_t *fuse_value, uint8_t command)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct read_fuse_msg_cmd {
+		uint32_t fuse_index;
+	} cmd = {
+		.fuse_index = fuse_index,
+	};
+	struct read_fuse_rsp {
+		uint32_t rsp_code;
+		uint32_t fuse_value;
+	} rsp = {};
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_BASELINE,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = command,
+	};
+
+	assert(fuse_value);
+
+	/* Fuse index is only 16bits wide for Read Common fuse */
+	if (command == ELE_CMD_READ_COMMON && cmd.fuse_index > UINT16_MAX)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to read fuse res = %" PRIx32, res);
+		return res;
+	}
+
+	*fuse_value = rsp.fuse_value;
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * ELE fuse map for imx8ulp
+ *
+ * @fuse_index: fuse id
+ *
+ * Return true if fuse id is supported by the ELE Read Common fuse command,
+ * this command is used to read non-security related fuses.
+ */
+static bool imx8ulp_ele_common_fuse_map(unsigned int fuse_index)
+{
+	switch (fuse_index) {
+	case 8 ... 23:
+	case 192 ... 224:
+	case 256 ... 295:
+	case 392 ... 415:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/*
+ * ELE fuse map for imx93
+ *
+ * @fuse_index: fuse id
+ *
+ * Return true if fuse id is supported by the ELE Read Common fuse command,
+ * this command is used to read non-security related fuses.
+ */
+static bool imx93_ele_common_fuse_map(unsigned int fuse_index)
+{
+	switch (fuse_index) {
+	case 24 ... 34:
+	case 63:
+	case 128 ... 144:
+	case 182:
+	case 188:
+		return true;
+	default:
+		return false;
+	}
+}
+
+TEE_Result imx_ocotp_read(unsigned int bank, unsigned int word,
+			  uint32_t *fuse_value)
+{
+	unsigned int fuse_index = 0;
+
+	if (!g_ele || !g_ele->fuse_map)
+		return TEE_ERROR_NOT_SUPPORTED;
+
+	if (!fuse_value)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (bank > g_ele->nb_banks || word > g_ele->nb_words)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	fuse_index = bank * g_ele->nb_words + word;
+
+	if (g_ele->fuse_map(fuse_index))
+		return imx_ele_read_fuse(fuse_index, fuse_value,
+					 ELE_CMD_READ_COMMON);
+	else
+		return imx_ele_read_fuse(fuse_index, fuse_value,
+					 ELE_CMD_READ_SHADOW);
+}
+
+static const struct ele_instance ele_imx93 = {
+	.nb_banks = 64,
+	.nb_words = 8,
+	.fuse_map = imx93_ele_common_fuse_map,
+};
+
+static const struct ele_instance ele_imx8ulp = {
+	.nb_banks = 64,
+	.nb_words = 8,
+	.fuse_map = imx8ulp_ele_common_fuse_map,
+};
+
+static TEE_Result imx_ele_fuse_init(void)
+{
+	switch (imx_soc_type()) {
+	case SOC_MX8ULP:
+		g_ele = &ele_imx8ulp;
+		break;
+	case SOC_MX93:
+	case SOC_MX91:
+		g_ele = &ele_imx93;
+		break;
+	default:
+		g_ele = NULL;
+		return TEE_ERROR_NOT_SUPPORTED;
+	}
+
+	return TEE_SUCCESS;
+}
+driver_init(imx_ele_fuse_init);
diff --git a/core/drivers/crypto/ele/include/ecc.h b/core/drivers/crypto/ele/include/ecc.h
new file mode 100644
index 000000000..5989ac97d
--- /dev/null
+++ b/core/drivers/crypto/ele/include/ecc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ *
+ * Brief   ELE ECC driver TEE Crypto integration.
+ */
+#ifndef __ECC_H__
+#define __ECC_H__
+
+#include <tee_api_types.h>
+
+#ifdef CFG_IMX_ELE_ECC_DRV
+/*
+ * Initialize the ECC module
+ */
+TEE_Result imx_ele_ecc_init(void);
+#else
+static inline TEE_Result imx_ele_ecc_init(void)
+{
+	return TEE_SUCCESS;
+}
+#endif /* CFG_IMX_ELE_ECC_DRV */
+
+#endif /* __ECC_H__ */
diff --git a/core/drivers/crypto/ele/include/utils_trace.h b/core/drivers/crypto/ele/include/utils_trace.h
new file mode 100644
index 000000000..13c8a4086
--- /dev/null
+++ b/core/drivers/crypto/ele/include/utils_trace.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef __UTILS_TRACE_H_
+#define __UTILS_TRACE_H_
+
+#include <drivers/imx_mu.h>
+#include <stddef.h>
+
+/*
+ * Dump ELE request/response message
+ *
+ * @msg ELE MU message
+ */
+void ele_trace_print_msg(struct imx_mu_msg msg);
+
+#endif /* __UTILS_TRACE_H_ */
diff --git a/core/drivers/crypto/ele/key_mgmt.c b/core/drivers/crypto/ele/key_mgmt.c
new file mode 100644
index 000000000..1489d3114
--- /dev/null
+++ b/core/drivers/crypto/ele/key_mgmt.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+#include <drivers/ele/ele.h>
+#include <drivers/ele/key_mgmt.h>
+#include <drivers/ele/utils_mem.h>
+#include <mm/core_memprot.h>
+#include <string.h>
+
+#define ELE_CMD_KEY_MGMT_OPEN  0x40
+#define ELE_CMD_KEY_MGMT_CLOSE 0x41
+#define ELE_CMD_GENERATE_KEY   0x42
+#define ELE_CMD_DELETE_KEY     0x4E
+
+/*
+ * Open a key management session with EdgeLock Enclave.
+ *
+ * @key_store_handle: EdgeLock Enclave key store handle
+ * @key_mgmt_handle: EdgeLock Enclave Key management handle
+ */
+TEE_Result imx_ele_key_mgmt_open(uint32_t key_store_handle,
+				 uint32_t *key_mgmt_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct key_mgmt_open_cmd {
+		uint32_t key_store_handle;
+		uint32_t msbi;
+		uint32_t msbo;
+		uint8_t flags;
+		uint8_t reserved[3];
+		uint32_t crc;
+	} __packed cmd = {
+		.key_store_handle = key_store_handle,
+		.msbi = 0,
+		.msbo = 0,
+		.flags = 0,
+		.reserved = {},
+		.crc = 0,
+	};
+
+	struct key_mgmt_open_msg_rsp {
+		uint32_t rsp_code;
+		uint32_t key_mgmt_handle;
+	} rsp = {};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_KEY_MGMT_OPEN,
+	};
+
+	if (!key_mgmt_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&msg);
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to open key management service flow");
+		return res;
+	}
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	*key_mgmt_handle = rsp.key_mgmt_handle;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_key_mgmt_close(uint32_t key_mgmt_handle)
+{
+	struct key_mgmt_close_cmd {
+		uint32_t key_mgmt_handle;
+	} cmd = {
+		.key_mgmt_handle = key_mgmt_handle,
+	};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_KEY_MGMT_CLOSE,
+	};
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	return imx_ele_call(&msg);
+}
+
+TEE_Result imx_ele_generate_key(uint32_t key_mgmt_handle,
+				size_t public_key_size, uint16_t key_group,
+				bool sync, bool mon_inc, uint32_t key_lifetime,
+				uint32_t key_usage, uint16_t key_type,
+				size_t key_size, uint32_t permitted_algo,
+				uint32_t key_lifecycle,
+				uint8_t *public_key_addr,
+				uint32_t *key_identifier)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct imx_mu_msg msg = {};
+	struct imx_ele_buf public_key = {};
+	struct gen_key_msg_cmd {
+		uint32_t key_mgmt_handle;
+		uint32_t key_id;
+		uint16_t public_key_size;
+		uint16_t key_group;
+		uint16_t key_type;
+		uint16_t key_size;
+		uint32_t key_lifetime;
+		uint32_t key_usage;
+		uint32_t permitted_algo;
+		uint32_t key_lifecycle;
+		uint8_t flags;
+		uint8_t reserved[3];
+		uint32_t public_key_addr;
+		uint32_t crc;
+	} __packed cmd = {};
+
+	struct gen_key_msg_rsp {
+		uint32_t rsp_code;
+		uint32_t key_identifier;
+		uint16_t pub_key_size;
+		uint16_t reserved;
+	} rsp = {};
+
+	if (!key_identifier || !public_key_addr)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	/* MONOTONIC counter increment flag can only be set with SYNC flag */
+	if (mon_inc && !sync)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_buf_alloc(&public_key, NULL, public_key_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Public key memory allocation failed");
+		return res;
+	}
+
+	cmd.key_mgmt_handle = key_mgmt_handle;
+	cmd.key_id = 0;
+	cmd.public_key_size = (uint16_t)public_key.size;
+	cmd.key_group = key_group;
+	cmd.key_type = key_type;
+	cmd.key_size = (uint16_t)key_size;
+	cmd.key_lifetime = key_lifetime;
+	cmd.key_usage = key_usage;
+	cmd.permitted_algo = permitted_algo;
+	cmd.key_lifecycle = key_lifecycle;
+	cmd.flags = (mon_inc ? IMX_ELE_FLAG_MON_INC : 0) |
+		    (sync ? IMX_ELE_FLAG_SYNC : 0);
+	cmd.public_key_addr = public_key.paddr;
+	cmd.crc = 0;
+
+	msg.header.version = ELE_VERSION_HSM;
+	msg.header.size = SIZE_MSG_32(cmd);
+	msg.header.tag = ELE_REQUEST_TAG;
+	msg.header.command = ELE_CMD_GENERATE_KEY;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&msg);
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to generate key res = %" PRIx32, res);
+		goto out;
+	}
+
+	res = imx_ele_buf_copy(&public_key, public_key_addr, public_key_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Public key copy failed");
+		goto out;
+	}
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	*key_identifier = rsp.key_identifier;
+
+out:
+	imx_ele_buf_free(&public_key);
+	return res;
+}
+
+TEE_Result imx_ele_delete_key(uint32_t key_mgmt_handle, uint32_t key_identifier,
+			      bool sync, bool mon_inc)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct delete_key_msg_cmd {
+		uint32_t key_mgmt_handle;
+		uint32_t key_identifier;
+		uint16_t rsvd1;
+		uint8_t flags;
+		uint8_t rsvd2;
+	} __packed cmd = {
+		.key_mgmt_handle = key_mgmt_handle,
+		.key_identifier = key_identifier,
+		.rsvd1 = 0,
+		.flags = (mon_inc ? IMX_ELE_FLAG_MON_INC : 0) |
+			 (sync ? IMX_ELE_FLAG_SYNC : 0),
+		.rsvd2 = 0,
+	};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_DELETE_KEY,
+	};
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to delete key res = %" PRIx32, res);
+		return res;
+	}
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/crypto/ele/key_store.c b/core/drivers/crypto/ele/key_store.c
new file mode 100644
index 000000000..28bd4f118
--- /dev/null
+++ b/core/drivers/crypto/ele/key_store.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2022-2023 NXP
+ */
+#include <drivers/ele/ele.h>
+#include <drivers/ele/key_store.h>
+#include <stdbool.h>
+#include <string.h>
+
+#define ELE_CMD_KEY_STORE_OPEN	    0x30
+#define ELE_CMD_KEY_STORE_CLOSE	    0x31
+
+#define IMX_ELE_GLOBAL_KEY_STORE_ID 0x1234
+#define IMX_ELE_KEY_STORE_AUTH_NONCE  0x1234
+#define IMX_ELE_KEY_STORE_MAX_UPDATES 100
+
+#define IMX_ELE_KEY_STORE_FLAG_CREATE 0x01
+
+TEE_Result imx_ele_key_store_open(uint32_t session_handle,
+				  uint32_t key_store_id, uint32_t auth_nonce,
+				  bool create, bool mon_inc, bool sync,
+				  uint32_t *key_store_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct key_store_open_cmd {
+		uint32_t session_handle;
+		uint32_t key_store_id;
+		uint32_t auth_nonce;
+		uint16_t rsvd1;
+		uint8_t flags;
+		uint8_t rsvd2;
+		uint32_t crc;
+	} __packed cmd = {
+		.session_handle = session_handle,
+		.key_store_id = key_store_id,
+		.auth_nonce = auth_nonce,
+		.rsvd1 = 0,
+		.flags = (create ? IMX_ELE_KEY_STORE_FLAG_CREATE : 0) |
+			 (mon_inc ? IMX_ELE_FLAG_MON_INC : 0) |
+			 (sync ? IMX_ELE_FLAG_SYNC : 0),
+		.rsvd2 = 0,
+		.crc = 0,
+	};
+
+	struct key_store_open_msg_rsp {
+		uint32_t rsp_code;
+		uint32_t key_store_handle;
+	} rsp = {};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_KEY_STORE_OPEN,
+	};
+
+	if (!key_store_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	/* SYNC flag can only be set with CREATE flag */
+	/* MONOTONIC counter increment flag can only be set with SYNC flag */
+	if ((sync && !create) || (mon_inc && !sync))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&msg);
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to open key store");
+		return res;
+	}
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	*key_store_handle = rsp.key_store_handle;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_key_store_close(uint32_t key_store_handle)
+{
+	struct key_store_close_cmd {
+		uint32_t key_store_handle;
+	} cmd = {
+		.key_store_handle = key_store_handle,
+	};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_KEY_STORE_CLOSE,
+	};
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	return imx_ele_call(&msg);
+}
+
+TEE_Result imx_ele_get_global_key_store_handle(uint32_t *key_store_handle)
+{
+	static uint32_t imx_ele_key_store_handle;
+	uint32_t imx_ele_session_handle = 0;
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	if (!key_store_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (imx_ele_key_store_handle) {
+		res = TEE_SUCCESS;
+		goto out;
+	}
+
+	res = imx_ele_get_global_session_handle(&imx_ele_session_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to get global session handle");
+		return res;
+	}
+
+	res = imx_ele_key_store_open(imx_ele_session_handle,
+				     IMX_ELE_GLOBAL_KEY_STORE_ID,
+				     IMX_ELE_KEY_STORE_AUTH_NONCE, true, false,
+				     false, &imx_ele_key_store_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to open key store handle");
+		return res;
+	}
+
+out:
+	*key_store_handle = imx_ele_key_store_handle;
+	return res;
+}
diff --git a/core/drivers/crypto/ele/sign_verify.c b/core/drivers/crypto/ele/sign_verify.c
new file mode 100644
index 000000000..a1e3e1f62
--- /dev/null
+++ b/core/drivers/crypto/ele/sign_verify.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+#include <drivers/ele/ele.h>
+#include <drivers/ele/sign_verify.h>
+#include <drivers/ele/utils_mem.h>
+#include <string.h>
+
+#define ELE_CMD_SIG_GEN_OPEN	 0x70
+#define ELE_CMD_SIG_GEN_CLOSE	 0x71
+#define ELE_CMD_SIG_GENERATE	 0x72
+#define ELE_CMD_SIG_VERIFY_OPEN	 0x80
+#define ELE_CMD_SIG_VERIFY_CLOSE 0x81
+#define ELE_CMD_SIG_VERIFICATION 0x82
+
+TEE_Result imx_ele_sig_gen_open(uint32_t key_store_handle,
+				uint32_t *sig_gen_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct sig_gen_open_msg_cmd {
+		uint32_t key_store_handle;
+		uint32_t msbi;
+		uint32_t msbo;
+		uint8_t flags;
+		uint8_t rsvd[3];
+		uint32_t crc;
+	} __packed cmd = {
+		.key_store_handle = key_store_handle,
+		.msbi = 0,
+		.msbo = 0,
+		.flags = 0,
+		.rsvd = {},
+		.crc = 0,
+	};
+
+	struct sig_gen_open_msg_rsp {
+		uint32_t rsp_code;
+		uint32_t sig_gen_handle;
+	} rsp = {};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SIG_GEN_OPEN,
+	};
+
+	if (!sig_gen_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&msg);
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to open signature generation res = %" PRIx32, res);
+		return res;
+	}
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	*sig_gen_handle = rsp.sig_gen_handle;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_sig_gen_close(uint32_t sig_gen_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct sig_gen_close_msg_cmd {
+		uint32_t sig_gen_handle;
+	} __packed cmd = {
+		.sig_gen_handle = sig_gen_handle,
+	};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SIG_GEN_CLOSE,
+	};
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to close signature gen flow res = %" PRIx32, res);
+		return res;
+	}
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_signature_generate(uint32_t sig_gen_handle,
+				      uint32_t key_identifier,
+				      const uint8_t *message,
+				      size_t message_size, uint8_t *signature,
+				      size_t signature_size,
+				      uint32_t signature_scheme,
+				      uint8_t message_type)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct imx_ele_buf msg = {};
+	struct imx_ele_buf sig = {};
+	struct imx_mu_msg mu_msg = {};
+
+	struct signature_generate_msg_cmd {
+		uint32_t sig_gen_handle;
+		uint32_t key_identifier;
+		uint32_t message;
+		uint32_t signature;
+		uint32_t message_size;
+		uint16_t signature_size;
+		uint8_t flags;
+		uint8_t rsvd;
+		uint32_t signature_scheme;
+		uint32_t crc;
+	} __packed cmd = {};
+
+	if (!message || !signature || !message_size || !signature_size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_buf_alloc(&msg, message, message_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Message memory allocation failed");
+		return res;
+	}
+
+	res = imx_ele_buf_alloc(&sig, NULL, signature_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature memory allocation failed");
+		goto out;
+	}
+
+	cmd.sig_gen_handle = sig_gen_handle;
+	cmd.key_identifier = key_identifier;
+	cmd.message = msg.paddr;
+	cmd.signature = sig.paddr;
+	cmd.message_size = (uint16_t)msg.size;
+	cmd.signature_size = (uint16_t)sig.size;
+	cmd.flags = message_type;
+	cmd.rsvd = 0;
+	cmd.signature_scheme = signature_scheme;
+	cmd.crc = 0;
+
+	mu_msg.header.version = ELE_VERSION_HSM;
+	mu_msg.header.size = SIZE_MSG_32(cmd);
+	mu_msg.header.tag = ELE_REQUEST_TAG;
+	mu_msg.header.command = ELE_CMD_SIG_GENERATE;
+
+	memcpy(mu_msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&mu_msg);
+
+	res = imx_ele_call(&mu_msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to generate signature res = %" PRIx32, res);
+		return res;
+	}
+
+	res = imx_ele_buf_copy(&sig, signature, signature_size);
+	if (res != TEE_SUCCESS)
+		EMSG("Signature copy failed");
+
+out:
+	imx_ele_buf_free(&msg);
+	imx_ele_buf_free(&sig);
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_sig_verify_open(uint32_t session_handle,
+				   uint32_t *sig_verify_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct sig_verif_open_msg_cmd {
+		uint32_t session_handle;
+		uint32_t msbi;
+		uint32_t msbo;
+		uint8_t flags;
+		uint8_t rsvd[3];
+		uint32_t crc;
+	} __packed cmd = {
+		.session_handle = session_handle,
+		.msbi = 0,
+		.msbo = 0,
+		.flags = 0,
+		.rsvd = {},
+		.crc = 0,
+	};
+
+	struct sig_verif_open_msg_rsp {
+		uint32_t rsp_code;
+		uint32_t sig_verify_handle;
+	} rsp = {};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SIG_VERIFY_OPEN,
+	};
+
+	if (!sig_verify_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&msg);
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("failed to open signature verif flow res = %" PRIx32, res);
+		return res;
+	}
+
+	memcpy(&rsp, msg.data.u8, sizeof(rsp));
+
+	*sig_verify_handle = rsp.sig_verify_handle;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_ele_sig_verify_close(uint32_t sig_verify_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	struct sig_verif_close_msg_cmd {
+		uint32_t sig_verify_handle;
+	} __packed cmd = {
+		.sig_verify_handle = sig_verify_handle,
+	};
+
+	struct imx_mu_msg msg = {
+		.header.version = ELE_VERSION_HSM,
+		.header.size = SIZE_MSG_32(cmd),
+		.header.tag = ELE_REQUEST_TAG,
+		.header.command = ELE_CMD_SIG_VERIFY_CLOSE,
+	};
+
+	memcpy(msg.data.u8, &cmd, sizeof(cmd));
+
+	res = imx_ele_call(&msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failed to close signature verif res = %" PRIx32, res);
+		return res;
+	}
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * This function will return SUCCESS or FAILURE based on Signature
+ * Verification status coming from ELE.
+ *
+ * @verification_status: Signature Verification status
+ */
+static TEE_Result imx_ele_sig_verify_status(uint32_t verification_status)
+{
+	switch (verification_status) {
+	case ELE_SIG_VERIFICATION_SUCCESS:
+		return TEE_SUCCESS;
+	case ELE_SIG_VERIFICATION_FAILURE:
+		return TEE_ERROR_SIGNATURE_INVALID;
+	default:
+		return TEE_ERROR_GENERIC;
+	}
+}
+
+TEE_Result imx_ele_signature_verification(uint32_t sig_verify_handle,
+					  const uint8_t *key,
+					  const uint8_t *message,
+					  size_t message_size,
+					  const uint8_t *signature,
+					  size_t signature_size,
+					  size_t key_size,
+					  size_t key_security_size,
+					  uint16_t key_type,
+					  uint32_t signature_scheme,
+					  uint8_t message_type)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct imx_mu_msg mu_msg = {};
+	struct imx_ele_buf public_key = {};
+	struct imx_ele_buf msg = {};
+	struct imx_ele_buf sig = {};
+
+	struct signature_verification_msg_cmd {
+		uint32_t sig_verify_handle;
+		uint32_t key;
+		uint32_t message;
+		uint32_t signature;
+		uint32_t message_size;
+		uint16_t signature_size;
+		uint16_t key_size;
+		uint16_t key_security_size;
+		uint16_t key_type;
+		uint8_t flags;
+		uint8_t rsvd[3];
+		uint32_t signature_scheme;
+		uint32_t crc;
+	} __packed cmd = {};
+
+	struct signature_verification_msg_rsp {
+		uint32_t rsp_code;
+		uint32_t verification_status;
+	} rsp = {};
+
+	if (!key || !message || !signature || !key_size ||
+	    !message_size || !signature_size || !key_security_size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_buf_alloc(&public_key, key, key_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Public key memory allocation failed");
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
+
+	res = imx_ele_buf_alloc(&msg, message, message_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Message memory allocation failed");
+		goto out;
+	}
+
+	res = imx_ele_buf_alloc(&sig, signature, signature_size);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature memory allocation failed");
+		goto out;
+	}
+
+	cmd.sig_verify_handle = sig_verify_handle;
+	cmd.key = public_key.paddr;
+	cmd.message = msg.paddr;
+	cmd.signature = sig.paddr;
+	cmd.message_size = (uint32_t)msg.size;
+	cmd.signature_size = (uint16_t)sig.size;
+	cmd.key_size = (uint16_t)public_key.size;
+	cmd.key_security_size = (uint16_t)key_security_size;
+	cmd.key_type = key_type;
+	cmd.flags = message_type;
+	cmd.signature_scheme = signature_scheme;
+	cmd.crc = 0;
+
+	mu_msg.header.version = ELE_VERSION_HSM;
+	mu_msg.header.size = SIZE_MSG_32(cmd);
+	mu_msg.header.tag = ELE_REQUEST_TAG;
+	mu_msg.header.command = ELE_CMD_SIG_VERIFICATION;
+
+	memcpy(mu_msg.data.u8, &cmd, sizeof(cmd));
+	update_crc(&mu_msg);
+
+	res = imx_ele_call(&mu_msg);
+	if (res != TEE_SUCCESS) {
+		EMSG("Failure in signature verificaction res = %" PRIx32, res);
+		goto out;
+	}
+
+	memcpy(&rsp, mu_msg.data.u8, sizeof(rsp));
+
+	res = imx_ele_sig_verify_status(rsp.verification_status);
+	if (res != TEE_SUCCESS)
+		EMSG("Signature Verification failed");
+
+out:
+	imx_ele_buf_free(&public_key);
+	imx_ele_buf_free(&msg);
+	imx_ele_buf_free(&sig);
+
+	return res;
+}
diff --git a/core/drivers/crypto/ele/sub.mk b/core/drivers/crypto/ele/sub.mk
new file mode 100644
index 000000000..b94d1b4bc
--- /dev/null
+++ b/core/drivers/crypto/ele/sub.mk
@@ -0,0 +1,10 @@
+incdirs-y += include
+
+srcs-y += ele.c
+srcs-y += utils_mem.c
+srcs-y += key_store.c
+srcs-y += key_mgmt.c
+srcs-y += fuse.c
+srcs-y += utils_trace.c
+srcs-y += sign_verify.c
+subdirs-$(CFG_IMX_ELE_ACIPHER_DRV) += acipher
diff --git a/core/drivers/crypto/ele/utils_mem.c b/core/drivers/crypto/ele/utils_mem.c
new file mode 100644
index 000000000..f851ce855
--- /dev/null
+++ b/core/drivers/crypto/ele/utils_mem.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+#include <drivers/ele/utils_mem.h>
+#include <io.h>
+#include <mm/core_memprot.h>
+#include <string.h>
+
+/*
+ * Allocate cache aligned memory of given size in bytes.
+ * Size will also be rounded up to cachec line size.
+ *
+ * @size   Size in bytes to allocate
+ */
+static void *imx_ele_calloc_align(size_t size)
+{
+	void *ptr = NULL;
+	size_t alloc_size = size;
+	size_t cacheline_size = dcache_get_line_size();
+
+	if (ROUNDUP_OVERFLOW(alloc_size, cacheline_size, &alloc_size))
+		return NULL;
+
+	ptr = memalign(cacheline_size, alloc_size);
+	if (!ptr) {
+		EMSG("alloc Error - NULL");
+		return NULL;
+	}
+
+	memset(ptr, 0, alloc_size);
+
+	return ptr;
+}
+
+/*
+ * Free allocated area
+ *
+ * @ptr  area to free
+ */
+static void imx_ele_free(void *ptr)
+{
+	if (ptr)
+		free(ptr);
+}
+
+void imx_ele_buf_cache_op(enum utee_cache_operation op,
+			  struct imx_ele_buf *ele_buf)
+{
+	if (ele_buf && ele_buf->data)
+		cache_operation(op, ele_buf->data, ele_buf->size);
+}
+
+TEE_Result imx_ele_buf_alloc(struct imx_ele_buf *ele_buf, const uint8_t *buf,
+			     size_t size)
+{
+	if (!ele_buf || !size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	ele_buf->data = imx_ele_calloc_align(size);
+	if (!ele_buf->data) {
+		EMSG("buffer allocation failed");
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
+
+	ele_buf->paddr = virt_to_phys(ele_buf->data);
+	if (!ele_buf->paddr) {
+		imx_ele_free(ele_buf->data);
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
+
+	reg_pair_from_64((uint64_t)ele_buf->paddr, &ele_buf->paddr_msb,
+			 &ele_buf->paddr_lsb);
+
+	ele_buf->size = size;
+
+	if (buf)
+		memcpy(ele_buf->data, buf, size);
+
+	imx_ele_buf_cache_op(TEE_CACHEFLUSH, ele_buf);
+
+	return TEE_SUCCESS;
+}
+
+void imx_ele_buf_free(struct imx_ele_buf *ele_buf)
+{
+	if (ele_buf) {
+		imx_ele_free(ele_buf->data);
+		ele_buf->data = NULL;
+		ele_buf->paddr = 0;
+		ele_buf->size = 0;
+	}
+}
+
+TEE_Result imx_ele_buf_copy(struct imx_ele_buf *ele_buf, uint8_t *buf,
+			    size_t size)
+{
+	if (!ele_buf || !buf || !size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (size < ele_buf->size)
+		return TEE_ERROR_SHORT_BUFFER;
+
+	imx_ele_buf_cache_op(TEE_CACHEINVALIDATE, ele_buf);
+	memcpy(buf, ele_buf->data, ele_buf->size);
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/crypto/ele/utils_trace.c b/core/drivers/crypto/ele/utils_trace.c
new file mode 100644
index 000000000..fe8b43736
--- /dev/null
+++ b/core/drivers/crypto/ele/utils_trace.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+#include <assert.h>
+#include <drivers/ele/ele.h>
+#include <drivers/imx_mu.h>
+#include <stdint.h>
+#include <utils_trace.h>
+
+void ele_trace_print_msg(struct imx_mu_msg msg)
+{
+	unsigned int i = 0;
+
+	DMSG("Header version %#" PRIx8 " size %#" PRIx8 " tag %#" PRIx8
+	     " command %#" PRIx8,
+	     msg.header.version, msg.header.size, msg.header.tag,
+	     msg.header.command);
+
+	/*
+	 * If the given message is response message, the first 4 bytes of the
+	 * message are status codes.
+	 */
+	if (msg.header.tag == ELE_RESPONSE_TAG) {
+		struct response_code rsp __maybe_unused =
+			get_response_code(msg.data.u32[0]);
+
+		DMSG("Response status: %#" PRIx8 " rating: %#" PRIx8
+		     " rating extension %#" PRIx8,
+		     rsp.status, rsp.rating, rsp.rating_extension);
+	} else {
+		DMSG("Request:");
+	}
+
+	for (i = 0; i < msg.header.size; i++)
+		DMSG("\t[%u] %#010" PRIx32, i, msg.data.u32[i]);
+}
diff --git a/core/drivers/crypto/sub.mk b/core/drivers/crypto/sub.mk
index 71cb6bd69..1c6a69b6a 100644
--- a/core/drivers/crypto/sub.mk
+++ b/core/drivers/crypto/sub.mk
@@ -13,3 +13,5 @@ subdirs-$(CFG_ASPEED_CRYPTO_DRIVER) += aspeed
 subdirs-$(CFG_VERSAL_CRYPTO_DRIVER) += versal
 
 subdirs-$(CFG_HISILICON_CRYPTO_DRIVER) += hisilicon
+
+subdirs-$(CFG_IMX_ELE) += ele
diff --git a/core/drivers/gic.c b/core/drivers/gic.c
index 0913da6ec..e2bfa3f3b 100644
--- a/core/drivers/gic.c
+++ b/core/drivers/gic.c
@@ -2,6 +2,7 @@
 /*
  * Copyright (c) 2016-2017, 2023-2024 Linaro Limited
  * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright 2021 NXP
  */
 
 #include <arm.h>
@@ -538,6 +539,18 @@ void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
 
 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
 
+#ifdef CFG_COCKPIT
+	/* check if GICD already configured,
+	 * if yes, do not touch it,
+	 * else it would break other partition's interrupts
+	 */
+	if (io_read32(gd->gicd_base + GICD_CTLR) &
+	    (GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN)) {
+		IMSG("GIC Distributor already configured: skip %s\n", __func__);
+		return;
+	}
+#endif
+
 #if defined(CFG_WITH_ARM_TRUSTED_FW)
 	/* GIC configuration is initialized from TF-A when embedded */
 	if (affinity_routing_is_enabled(gd)) {
diff --git a/core/drivers/imx/dcp/dcp.c b/core/drivers/imx/dcp/dcp.c
index 34324b903..a73b4fb53 100644
--- a/core/drivers/imx/dcp/dcp.c
+++ b/core/drivers/imx/dcp/dcp.c
@@ -697,12 +697,6 @@ static TEE_Result dcp_pbase(paddr_t *base)
 	if (fdt_get_status(fdt, node) == DT_STATUS_DISABLED)
 		return TEE_ERROR_ITEM_NOT_FOUND;
 
-	/* Force secure-status = "okay" and status="disabled" */
-	if (dt_enable_secure_status(fdt, node)) {
-		EMSG("Not able to set DCP Control DTB entry secure");
-		return TEE_ERROR_NOT_SUPPORTED;
-	}
-
 	*base = fdt_reg_base_address(fdt, node);
 	if (*base == DT_INFO_INVALID_REG) {
 		EMSG("Unable to get the DCP Base address");
diff --git a/core/drivers/imx/mu/imx_mu.c b/core/drivers/imx/mu/imx_mu.c
index 017b97a4f..86175b79b 100644
--- a/core/drivers/imx/mu/imx_mu.c
+++ b/core/drivers/imx/mu/imx_mu.c
@@ -1,9 +1,10 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2022 NXP
+ * Copyright 2022-2023 NXP
  */
 #include <assert.h>
 #include <drivers/imx_mu.h>
+#include <imx-regs.h>
 #include <kernel/delay.h>
 #include <kernel/spinlock.h>
 #include <string.h>
@@ -13,6 +14,12 @@
 
 #define RX_TIMEOUT (100 * 1000)
 
+#if defined(CFG_MX93) || defined(CFG_MX91)
+#define IS_MU_TRUST (MU_BASE == MU_TRUST_BASE)
+#else
+#define IS_MU_TRUST false
+#endif
+
 static unsigned int mu_spinlock = SPINLOCK_UNLOCK;
 
 __weak void imx_mu_plat_init(vaddr_t base __unused)
@@ -89,6 +96,8 @@ static TEE_Result imx_mu_send_msg(vaddr_t base, struct imx_mu_msg *msg)
 	TEE_Result res = TEE_ERROR_GENERIC;
 	unsigned int count = 0;
 	unsigned int nb_channel = 0;
+	unsigned int start_index = 0;
+	unsigned int end_index = 0;
 	uint32_t word = 0;
 
 	assert(base && msg);
@@ -98,16 +107,49 @@ static TEE_Result imx_mu_send_msg(vaddr_t base, struct imx_mu_msg *msg)
 		return TEE_ERROR_BAD_FORMAT;
 	}
 
-	memcpy(&word, &msg->header, sizeof(uint32_t));
-	res = imx_mu_plat_send(base, 0, word);
-	if (res)
-		return res;
+	if (IS_MU_TRUST) {
+		/*
+		 * make sure command (bit[31:26]) is higher than SCM_CR2_CMD_VAL
+		 * SCM_CR2_CMD_VAL is set to 0 by ELE FW. but let’s use
+		 * max value.
+		 */
+		word |= GENMASK_32(31, 26);
 
-	nb_channel = imx_mu_plat_get_tx_channel(base);
+		/* size (including dummy header) ->  bit[19:16]*/
+		word |= SHIFT_U32(((msg->header.size + 1) & GENMASK_32(3, 0)),
+				  16);
 
-	for (count = 1; count < msg->header.size; count++) {
+		res = imx_mu_plat_send(base, start_index, word);
+		if (res)
+			return res;
+
+		start_index++;
+		memcpy(&word, &msg->header, sizeof(uint32_t));
+		res = imx_mu_plat_send(base, start_index, word);
+		if (res)
+			return res;
+
+		start_index++;
+		/*
+		 * TR15 is reserved for special USM commands
+		 */
+		nb_channel = imx_mu_plat_get_tx_channel(base) - 1;
+		end_index = msg->header.size + 1;
+		assert(end_index < nb_channel);
+	} else {
+		memcpy(&word, &msg->header, sizeof(uint32_t));
+		res = imx_mu_plat_send(base, start_index, word);
+		if (res)
+			return res;
+
+		start_index++;
+		nb_channel = imx_mu_plat_get_tx_channel(base);
+		end_index = msg->header.size;
+	}
+
+	for (count = start_index; count < end_index; count++) {
 		res = imx_mu_plat_send(base, count % nb_channel,
-				       msg->data.u32[count - 1]);
+				       msg->data.u32[count - start_index]);
 		if (res)
 			return res;
 	}
diff --git a/core/drivers/imx/mu/sub.mk b/core/drivers/imx/mu/sub.mk
index c64f5fed8..23403088f 100644
--- a/core/drivers/imx/mu/sub.mk
+++ b/core/drivers/imx/mu/sub.mk
@@ -1,5 +1,5 @@
 srcs-y += imx_mu.c
-srcs-$(call cfg-one-enabled,CFG_MX8ULP CFG_MX93 CFG_MX91) += imx_mu_8ulp.c
+srcs-$(call cfg-one-enabled,CFG_MX8ULP CFG_MX93 CFG_MX91 CFG_MX95) += imx_mu_8ulp.c
 ifeq ($(filter y, $(CFG_MX8QM) $(CFG_MX8QX) $(CFG_MX8DXL)),y)
 srcs-y += imx_mu_8q.c
 endif
diff --git a/core/drivers/imx_csu.c b/core/drivers/imx_csu.c
index fc06697ef..54463c3ce 100644
--- a/core/drivers/imx_csu.c
+++ b/core/drivers/imx_csu.c
@@ -44,13 +44,12 @@ const struct csu_setting csu_setting_imx6ul[] = {
 const struct csu_setting csu_setting_imx6ull[] = {
 	{ 13, 0xFF0033 },	/* Protect ROMCP */
 	{ 16, 0x3300FF },	/* Protect TZASC */
-	{ 34, 0xFF0033 },	/* Protect DCP */
 	{ 39, 0x3300FF },	/* Protect OCRAM */
 	{ (-1), 0 },
 };
 
 const struct csu_setting csu_setting_imx6sl[] = {
-	{ 13, 0x3F0033 },	/* Protect DCP/ROMCP */
+	{ 13, 0xFF0033 },	/* Protect ROMCP */
 	{ 16, 0xFF0033 },	/* Protect TZASC */
 	{ 26, 0xFF0033 },	/* Protect OCRAM */
 	{ (-1), 0 },
diff --git a/core/drivers/imx_ele.c b/core/drivers/imx_ele.c
deleted file mode 100644
index 2ea1aff1f..000000000
--- a/core/drivers/imx_ele.c
+++ /dev/null
@@ -1,559 +0,0 @@
-// SPDX-License-Identifier: BSD-2-Clause
-/*
- * Copyright 2022-2023 NXP
- */
-#include <drivers/imx_mu.h>
-#include <initcall.h>
-#include <kernel/boot.h>
-#include <kernel/delay.h>
-#include <kernel/panic.h>
-#include <kernel/tee_common_otp.h>
-#include <mm/core_memprot.h>
-#include <mm/core_mmu.h>
-#include <stdint.h>
-#include <tee/cache.h>
-#include <tee_api_defines.h>
-#include <trace.h>
-#include <types_ext.h>
-#include <utee_types.h>
-#include <util.h>
-
-#define ELE_BASE_ADDR MU_BASE
-#define ELE_BASE_SIZE MU_SIZE
-
-#define ELE_VERSION_BASELINE 0x06
-#define ELE_VERSION_HSM	     0x07
-#define ELE_COMMAND_SUCCEED  0xd6
-#define ELE_REQUEST_TAG	     0x17
-#define ELE_RESPONSE_TAG     0xe1
-
-#define ELE_CMD_SESSION_OPEN	    0x10
-#define ELE_CMD_SESSION_CLOSE	    0x11
-#define ELE_CMD_SESSION_DEVICE_INFO 0x16
-#define ELE_CMD_RNG_GET		    0xCD
-#define ELE_CMD_TRNG_STATE	    0xA4
-#define ELE_CMD_GET_INFO	    0xDA
-#define ELE_CMD_DERIVE_KEY	    0xA9
-
-#define IMX_ELE_TRNG_STATUS_READY 0x3
-
-#define ELE_MU_ID  0x2
-#define ELE_MU_IRQ 0x0
-
-#if defined(CFG_MX8ULP)
-#define ELE_MU_DID 0x7
-#define CACHELINE_SIZE 64
-#elif defined(CFG_MX93) || defined(CFG_MX91)
-#define ELE_MU_DID 0x3
-#define CACHELINE_SIZE 64
-#else
-#error "Platform DID is not defined"
-#endif
-
-#define SIZE_MSG_32(_msg) size_msg_32(sizeof(_msg))
-
-register_phys_mem_pgdir(MEM_AREA_IO_SEC, ELE_BASE_ADDR, ELE_BASE_SIZE);
-
-struct get_info_msg_rsp {
-	uint32_t rsp_code;
-	uint16_t soc_id;
-	uint16_t soc_rev;
-	uint16_t lifecycle;
-	uint16_t sssm_state;
-	uint32_t uid[4];
-	uint32_t sha256_rom_patch[8];
-	uint32_t sha256_fw[8];
-} __packed;
-
-struct session_get_device_info_rsp {
-	uint32_t rsp_code;
-	uint32_t user_sab_id;
-	uint32_t chip_uid[4];
-	uint16_t chip_life_cycle;
-	uint16_t chip_monotonic_counter;
-	uint32_t ele_version;
-	uint32_t ele_version_ext;
-	uint8_t fips_mode;
-	uint8_t reserved[3];
-	uint32_t crc;
-} __packed;
-
-struct response_code {
-	uint8_t status;
-	uint8_t rating;
-	uint16_t rating_extension;
-} __packed;
-
-/*
- * Print ELE response status and rating
- *
- * @rsp response code structure
- */
-static void print_rsp_code(struct response_code rsp __maybe_unused)
-{
-	DMSG("Response status %#"PRIx8", rating %#"PRIx8" (ext %#"PRIx16")",
-	     rsp.status, rsp.rating, rsp.rating_extension);
-}
-
-/*
- * Print ELE message header
- *
- * @hdr message header
- */
-static void print_msg_header(struct imx_mu_msg_header hdr __maybe_unused)
-{
-	DMSG("Header ver %#"PRIx8", size %"PRId8", tag %#"PRIx8", cmd %#"PRIx8,
-	     hdr.version, hdr.size, hdr.tag, hdr.command);
-}
-
-/*
- * Print full ELE message content
- *
- * @msg message
- */
-static void dump_message(const struct imx_mu_msg *msg __maybe_unused)
-{
-	size_t i = 0;
-	size_t size __maybe_unused = msg->header.size;
-	uint32_t *data __maybe_unused = (uint32_t *)msg;
-
-	DMSG("Dump of message %p(%zu)", data, size);
-	for (i = 0; i < size; i++)
-		DMSG("word %zu: %#"PRIx32, i, data[i]);
-}
-
-/*
- * Return the number of 32 bits words of the given message.
- *
- * @cmd command size in byte
- */
-static size_t size_msg_32(size_t cmd)
-{
-	/* Roundup and add header size */
-	return ROUNDUP_DIV(cmd, sizeof(uint32_t)) + 1;
-}
-
-/*
- * The CRC for the message is computed xor-ing all the words of the message:
- * the header and all the words except the word storing the CRC.
- *
- * @msg MU message to hash
- */
-static uint32_t compute_crc(const struct imx_mu_msg *msg)
-{
-	uint32_t crc = 0;
-	uint8_t i = 0;
-	uint32_t *payload = (uint32_t *)msg;
-
-	assert(msg);
-
-	for (i = 0; i < msg->header.size - 1; i++)
-		crc ^= payload[i];
-
-	return crc;
-}
-
-/*
- * Compute message CRC and update CRC in message header.
- *
- * @msg MU message to hash
- */
-static void update_crc(struct imx_mu_msg *msg)
-{
-	assert(msg);
-	/*
-	 * The CRC field is the last element of array. The size of the header
-	 * is also subtracted from CRC computation.
-	 */
-	msg->data.u32[msg->header.size - 2] = compute_crc(msg);
-}
-
-/*
- * Return the given MU base address, depending on the MMU state.
- *
- * @pa MU physical base address
- * @sz MU size
- */
-static vaddr_t imx_ele_init(paddr_t pa, size_t sz)
-{
-	static bool is_initialized;
-	vaddr_t va = 0;
-
-	assert(pa && sz);
-
-	if (cpu_mmu_enabled())
-		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
-	else
-		va = (vaddr_t)pa;
-
-	if (!is_initialized) {
-		imx_mu_init(va);
-		is_initialized = true;
-	}
-
-	return va;
-}
-
-/*
- * Extract response codes from the given word
- *
- * @word 32 bits word MU response
- */
-static struct response_code get_response_code(uint32_t word)
-{
-	struct response_code rsp = {
-		.rating_extension = (word & GENMASK_32(31, 16)) >> 16,
-		.rating = (word & GENMASK_32(15, 8)) >> 8,
-		.status = (word & GENMASK_32(7, 0)) >> 0,
-	};
-
-	return rsp;
-}
-
-/*
- * Initiate a communication with the EdgeLock Enclave. It sends a message
- * and expects an answer.
- *
- * @msg MU message
- */
-static TEE_Result imx_ele_call(struct imx_mu_msg *msg)
-{
-	TEE_Result res = TEE_ERROR_GENERIC;
-	struct response_code rsp = { };
-	vaddr_t va = 0;
-
-	assert(msg);
-
-	if (msg->header.tag != ELE_REQUEST_TAG) {
-		EMSG("Request has invalid tag: %#"PRIx8" instead of %#"PRIx8,
-		     msg->header.tag, ELE_REQUEST_TAG);
-		return TEE_ERROR_BAD_PARAMETERS;
-	}
-
-	va = imx_ele_init(ELE_BASE_ADDR, ELE_BASE_SIZE);
-	if (!va) {
-		EMSG("Fail to get base address");
-		return TEE_ERROR_GENERIC;
-	}
-
-	res = imx_mu_call(va, msg, true);
-	if (res) {
-		EMSG("Failed to transmit message: %#"PRIx32, res);
-		print_msg_header(msg->header);
-		dump_message(msg);
-		return res;
-	}
-
-	rsp = get_response_code(msg->data.u32[0]);
-
-	if (msg->header.tag != ELE_RESPONSE_TAG) {
-		EMSG("Response has invalid tag: %#"PRIx8" instead of %#"PRIx8,
-		     msg->header.tag, ELE_RESPONSE_TAG);
-		print_msg_header(msg->header);
-		return TEE_ERROR_GENERIC;
-	}
-
-	if (rsp.status != ELE_COMMAND_SUCCEED) {
-		EMSG("Command has failed");
-		print_rsp_code(rsp);
-		return TEE_ERROR_GENERIC;
-	}
-
-	/* The rating can be different in success and failing cases */
-	if (rsp.rating != 0) {
-		EMSG("Command has invalid rating: %#"PRIx8, rsp.rating);
-		print_rsp_code(rsp);
-		return TEE_ERROR_GENERIC;
-	}
-
-	return TEE_SUCCESS;
-}
-
-/*
- * Get device information from EdgeLock Enclave
- *
- * @session_handle EdgeLock Enclave session handle
- * @rsp Device info
- */
-static TEE_Result
-imx_ele_session_get_device_info(uint32_t session_handle,
-				struct session_get_device_info_rsp *rsp)
-{
-	TEE_Result res = TEE_ERROR_GENERIC;
-	struct session_get_device_info_cmd {
-		uint32_t session_handle;
-	} cmd = {
-		.session_handle = session_handle,
-	};
-	struct imx_mu_msg msg = {
-		.header.version = ELE_VERSION_HSM,
-		.header.size = SIZE_MSG_32(cmd),
-		.header.tag = ELE_REQUEST_TAG,
-		.header.command = ELE_CMD_SESSION_DEVICE_INFO,
-	};
-
-	assert(rsp);
-
-	memcpy(msg.data.u8, &cmd, sizeof(cmd));
-
-	res = imx_ele_call(&msg);
-	if (res)
-		return res;
-
-	memcpy(rsp, msg.data.u32, sizeof(*rsp));
-
-	if (compute_crc(&msg) != rsp->crc)
-		return TEE_ERROR_CORRUPT_OBJECT;
-
-	return TEE_SUCCESS;
-}
-
-/*
- * Open a session with EdgeLock Enclave. It returns a session handle.
- *
- * @session_handle EdgeLock Enclave session handle
- */
-static TEE_Result imx_ele_session_open(uint32_t *session_handle)
-{
-	TEE_Result res = TEE_ERROR_GENERIC;
-	struct open_session_cmd {
-		uint8_t mu_id;
-		uint8_t interrupt_num;
-		uint8_t tz;
-		uint8_t did;
-		uint8_t priority;
-		uint8_t op_mode;
-		uint16_t reserved;
-	} __packed cmd = {
-		.mu_id = ELE_MU_ID,
-		.interrupt_num = ELE_MU_IRQ,
-		.tz = 0,
-		.did = ELE_MU_DID,
-		.priority = 0,
-		.op_mode = 0,
-		.reserved = 0,
-	};
-	struct open_session_rsp {
-		uint32_t rsp_code;
-		uint32_t session_handle;
-	} rsp = { };
-	struct imx_mu_msg msg = {
-		.header.version = ELE_VERSION_HSM,
-		.header.size = SIZE_MSG_32(cmd),
-		.header.tag = ELE_REQUEST_TAG,
-		.header.command = ELE_CMD_SESSION_OPEN,
-	};
-
-	assert(session_handle);
-
-	memcpy(msg.data.u8, &cmd, sizeof(cmd));
-
-	res = imx_ele_call(&msg);
-	if (res)
-		return res;
-
-	memcpy(&rsp, msg.data.u8, sizeof(rsp));
-
-	*session_handle = rsp.session_handle;
-
-	return TEE_SUCCESS;
-}
-
-/*
- * Close a session with EdgeLock Enclave.
- *
- * @session_handle EdgeLock Enclave session handle
- */
-static TEE_Result imx_ele_session_close(uint32_t session_handle)
-{
-	struct close_session_cmd {
-		uint32_t session_handle;
-	} cmd = {
-		.session_handle = session_handle,
-	};
-	struct imx_mu_msg msg = {
-		.header.version = ELE_VERSION_HSM,
-		.header.size = SIZE_MSG_32(cmd),
-		.header.tag = ELE_REQUEST_TAG,
-		.header.command = ELE_CMD_SESSION_CLOSE,
-	};
-
-	memcpy(msg.data.u8, &cmd, sizeof(cmd));
-
-	return imx_ele_call(&msg);
-}
-
-/*
- * Get the current state of the ELE TRNG
- */
-static TEE_Result imx_ele_rng_get_trng_state(void)
-{
-	TEE_Result res = TEE_ERROR_GENERIC;
-	struct rng_get_trng_state_msg_rsp {
-		uint32_t rsp_code;
-		uint8_t trng_state;
-		uint8_t csal_state;
-	} __packed rsp = { };
-	struct imx_mu_msg msg = {
-		.header.version = ELE_VERSION_BASELINE,
-		.header.size = 1,
-		.header.tag = ELE_REQUEST_TAG,
-		.header.command = ELE_CMD_TRNG_STATE,
-	};
-
-	res = imx_ele_call(&msg);
-	if (res)
-		return res;
-
-	memcpy(&rsp, msg.data.u8, sizeof(rsp));
-
-	if (rsp.trng_state != IMX_ELE_TRNG_STATUS_READY)
-		return TEE_ERROR_BUSY;
-	else
-		return TEE_SUCCESS;
-}
-
-unsigned long plat_get_aslr_seed(void)
-{
-	TEE_Result res = TEE_ERROR_GENERIC;
-	uint64_t timeout = timeout_init_us(10 * 1000);
-	struct rng_get_random_cmd {
-		uint32_t addr_msb;
-		uint32_t addr_lsb;
-		uint32_t size;
-		uint32_t crc;
-	} cmd = { };
-	struct imx_mu_msg msg = {
-		.header.version = ELE_VERSION_HSM,
-		.header.size = SIZE_MSG_32(cmd),
-		.header.tag = ELE_REQUEST_TAG,
-		.header.command = ELE_CMD_RNG_GET,
-	};
-	unsigned long aslr __aligned(CACHELINE_SIZE) = 0;
-
-	/*
-	 * This function can only be called when the MMU is off. No
-	 * virtual/physical address translation is performed, nor cache
-	 * maintenance.
-	 */
-	assert(!cpu_mmu_enabled());
-
-	reg_pair_from_64((uint64_t)&aslr, &cmd.addr_msb, &cmd.addr_lsb);
-	cmd.size = sizeof(aslr);
-
-	/*
-	 * Check the current TRNG state of the ELE. The TRNG must be
-	 * started with a command earlier in the boot to allow the TRNG
-	 * to generate enough entropy.
-	 */
-	while (imx_ele_rng_get_trng_state() == TEE_ERROR_BUSY)
-		if (timeout_elapsed(timeout))
-			panic("ELE RNG is busy");
-
-	memcpy(msg.data.u8, &cmd, sizeof(cmd));
-	update_crc(&msg);
-
-	res = imx_ele_call(&msg);
-	if (res)
-		panic("Cannot retrieve random data from ELE");
-
-	return aslr;
-}
-
-int tee_otp_get_die_id(uint8_t *buffer, size_t len)
-{
-	uint32_t session_handle = 0;
-	/*
-	 * The die ID must be cached because some board configuration prevents
-	 * the MU to be used by OPTEE at runtime.
-	 */
-	static struct session_get_device_info_rsp rsp;
-
-	if (rsp.rsp_code)
-		goto out;
-
-	if (imx_ele_session_open(&session_handle))
-		goto err;
-
-	if (imx_ele_session_get_device_info(session_handle, &rsp))
-		goto err;
-
-	if (imx_ele_session_close(session_handle))
-		goto err;
-
-out:
-	/*
-	 * In the device info array return by the ELE, the words 2, 3, 4 and 5
-	 * are the device UID.
-	 */
-	memcpy(buffer, rsp.chip_uid, MIN(sizeof(rsp.chip_uid), len));
-
-	return 0;
-err:
-	panic("Fail to get the device UID");
-}
-
-#if defined(CFG_MX93) || defined(CFG_MX91)
-TEE_Result tee_otp_get_hw_unique_key(struct tee_hw_unique_key *hwkey)
-{
-	TEE_Result res = TEE_ERROR_GENERIC;
-	const char pattern[16] __aligned(CACHELINE_SIZE) = "TEE_for_HUK_ELE";
-	static uint8_t key[CACHELINE_SIZE] __aligned(CACHELINE_SIZE);
-	static bool is_fetched;
-	uint32_t msb = 0;
-	uint32_t lsb = 0;
-	struct key_derive_cmd {
-		uint32_t key_addr_msb;
-		uint32_t key_addr_lsb;
-		uint32_t ctx_addr_msb;
-		uint32_t ctx_addr_lsb;
-		uint16_t key_size;
-		uint16_t ctx_size;
-		uint32_t crc;
-	} __packed cmd = { };
-	struct imx_mu_msg msg = {
-		.header.version = ELE_VERSION_BASELINE,
-		.header.size = SIZE_MSG_32(cmd),
-		.header.tag = ELE_REQUEST_TAG,
-		.header.command = ELE_CMD_DERIVE_KEY,
-	};
-
-	if (is_fetched)
-		goto out;
-
-	/*
-	 * Intermediate msb and lsb values are needed. Directly using
-	 * key_addr_msb and key_addr_lsb might be unaligned because of the
-	 * __packed attribute of key_derive_cmd {}
-	 */
-	reg_pair_from_64((uint64_t)virt_to_phys(key), &msb, &lsb);
-
-	cmd.key_addr_lsb = lsb;
-	cmd.key_addr_msb = msb;
-	cmd.key_size = HW_UNIQUE_KEY_LENGTH;
-
-	reg_pair_from_64((uint64_t)virt_to_phys((void *)pattern), &msb, &lsb);
-
-	cmd.ctx_addr_lsb = lsb;
-	cmd.ctx_addr_msb = msb;
-	cmd.ctx_size = sizeof(pattern);
-
-	memcpy(msg.data.u8, &cmd, sizeof(cmd));
-	update_crc(&msg);
-
-	cache_operation(TEE_CACHEFLUSH, key, HW_UNIQUE_KEY_LENGTH);
-	cache_operation(TEE_CACHECLEAN, (void *)pattern, sizeof(pattern));
-
-	res = imx_ele_call(&msg);
-	if (res)
-		panic("failed to get the huk");
-
-	cache_operation(TEE_CACHEINVALIDATE, key, HW_UNIQUE_KEY_LENGTH);
-	is_fetched = true;
-out:
-	memcpy(hwkey->data, key,
-	       MIN(sizeof(key), (size_t)HW_UNIQUE_KEY_LENGTH));
-
-	return TEE_SUCCESS;
-}
-#endif /* CFG_MX93 || CFG_MX91 */
diff --git a/core/drivers/imx_scu.c b/core/drivers/imx_scu.c
index 738a60f1d..d81609a16 100644
--- a/core/drivers/imx_scu.c
+++ b/core/drivers/imx_scu.c
@@ -4,10 +4,13 @@
  *
  */
 
+#include <drivers/imx_scu.h>
 #include <imx.h>
 #include <initcall.h>
 #include <io.h>
+#include <kernel/cache_helpers.h>
 #include <kernel/tz_ssvce_def.h>
+#include <kernel/pm.h>
 #include <mm/core_memprot.h>
 
 /* Invalidate all registers */
@@ -17,7 +20,7 @@
 /* Both non-secure CPU access SCU, private and global timer */
 #define SCU_NSAC_CTRL_INIT	0x00000FFF
 
-static TEE_Result scu_init(void)
+TEE_Result scu_init(void)
 {
 	vaddr_t scu_base = core_mmu_get_va(SCU_BASE, MEM_AREA_IO_SEC,
 					   SCU_SIZE);
@@ -35,4 +38,24 @@ static TEE_Result scu_init(void)
 
 	return TEE_SUCCESS;
 }
-driver_init(scu_init);
+
+static TEE_Result pm_enter_resume(enum pm_op op, uint32_t pm_hint __unused,
+		const struct pm_callback_handle *pm_handle __unused)
+{
+	if (op == PM_OP_RESUME) {
+		scu_init();
+		dcache_op_all(DCACHE_OP_CLEAN_INV);
+	}
+	return TEE_SUCCESS;
+}
+
+static TEE_Result scu_configure(void)
+{
+	scu_init();
+#ifdef CFG_PSCI_ARM32
+	register_pm_driver_cb(pm_enter_resume, NULL, "imx-scu");
+#endif
+	return TEE_SUCCESS;
+}
+
+driver_init(scu_configure);
diff --git a/core/drivers/imx_snvs.c b/core/drivers/imx_snvs.c
index 7f05614c9..eaa8fb25b 100644
--- a/core/drivers/imx_snvs.c
+++ b/core/drivers/imx_snvs.c
@@ -6,6 +6,7 @@
  */
 
 #include <drivers/imx_snvs.h>
+#include <initcall.h>
 #include <io.h>
 #include <mm/core_memprot.h>
 #include <mm/core_mmu.h>
@@ -205,3 +206,17 @@ void imx_snvs_shutdown(void)
 		   SNVS_LPCR_DP_EN_MASK |
 		   SNVS_LPCR_SRTC_ENV_MASK);
 }
+
+static TEE_Result snvs_set_npswa_en(void)
+{
+	if (snvs_is_device_closed()) {
+		vaddr_t snvs_base = core_mmu_get_va(SNVS_BASE, MEM_AREA_IO_SEC,
+						    SNVS_SIZE);
+
+		io_mask32(snvs_base + SNVS_HPCOMR, SNVS_HPCOMR_NPSWA_EN,
+			  SNVS_HPCOMR_NPSWA_EN);
+	}
+
+	return TEE_SUCCESS;
+}
+driver_init(snvs_set_npswa_en);
diff --git a/core/drivers/imx_trusted_arm_ce.c b/core/drivers/imx_trusted_arm_ce.c
new file mode 100644
index 000000000..2025ef2d2
--- /dev/null
+++ b/core/drivers/imx_trusted_arm_ce.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+
+#include <drivers/imx_trusted_arm_ce.h>
+#ifdef CFG_WITH_VFP
+#include <kernel/vfp.h>
+#endif
+#include <pta_imx_trusted_arm_ce.h>
+
+TEE_Result imx_smc_cipher_cbc(struct thread_smc_args *args, bool encrypt)
+{
+	uint32_t key_id = (uint32_t)args->a1;
+
+	vfp_enable();
+
+	args->a0 = cipher_cbc(key_id, args->a2, args->a3, args->a4, args->a5,
+			      args->a6, encrypt);
+	vfp_disable();
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result imx_smc_cipher_xts(struct thread_smc_args *args, bool encrypt)
+{
+	uint32_t key_id_1 = (uint32_t)(args->a1 & 0xFFFFFFFF);
+	uint32_t key_id_2 = (uint32_t)(args->a1 >> 32);
+
+	vfp_enable();
+
+	args->a0 = cipher_xts(key_id_1, key_id_2, args->a2, args->a3, args->a4,
+			      args->a5, args->a6, encrypt);
+	vfp_disable();
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/pm/imx/busfreq/busfreq.c b/core/drivers/pm/imx/busfreq/busfreq.c
new file mode 100644
index 000000000..0145cfa49
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq.c
+ *
+ * @brief   Implementation of the bus frequency scaling.\n
+ *          Interface and initialization.
+ */
+
+/* Global includes */
+#include <initcall.h>
+#include <kernel/boot.h>
+#include <kernel/dt.h>
+#include <libfdt.h>
+#include <tee_api_types.h>
+#include <trace.h>
+
+/* Platform includes */
+#include <imx.h>
+
+/* Local include */
+#include "busfreq_imx6.h"
+#include "busfreq_imx7.h"
+
+#include <drivers/pm/imx/busfreq.h>
+
+#ifndef CFG_IMX6
+__weak TEE_Result imx6_busfreq_change(uint32_t freq __unused,
+		uint32_t dll_off __unused)
+{
+	return TEE_ERROR_NOT_SUPPORTED;
+}
+
+__weak TEE_Result imx6_busfreq_init(void)
+{
+	return TEE_ERROR_GENERIC;
+}
+#endif
+#ifndef CFG_IMX7
+__weak TEE_Result imx7_busfreq_change(uint32_t freq __unused,
+		uint32_t dll_off __unused)
+{
+	return TEE_ERROR_NOT_SUPPORTED;
+}
+
+__weak TEE_Result imx7_busfreq_init(void)
+{
+	return TEE_ERROR_GENERIC;
+}
+#endif
+
+/**
+ * @brief   Change the bus frequency on any i.MX devices
+ *          to the requested frequency \a freq and
+ *          switch DLL off is \a dll_off = 1
+ *
+ * @param[in] freq     Frequency to switch to
+ * @param[in] dll_off  switch DLL off or not
+ *
+ * @retval  TEE_SUCCESS              No error
+ * @retvql  TEE_ERROR_NOT_SUPPORTED  Feature not supported
+ */
+TEE_Result busfreq_change(uint32_t freq, uint32_t dll_off)
+{
+	TEE_Result ret = TEE_ERROR_NOT_SUPPORTED;
+
+	if (soc_is_imx6())
+		ret = imx6_busfreq_change(freq, dll_off);
+	else if (soc_is_imx7ds())
+		ret = imx7_busfreq_change(freq, dll_off);
+
+	return ret;
+}
+
+#ifdef CFG_DT
+/**
+ * @brief   Add the busfreq property into the "/firmware/optee"
+ *          node.
+ *
+ * @retval 0    if success
+ * @retval (-1) otherwise
+ */
+static int dt_busfreq(void)
+{
+	void *fdt = get_dt();
+	int  offs;
+	int  ret;
+
+	offs = fdt_path_offset(fdt, "/firmware/optee");
+
+	if (offs < 0) {
+		EMSG("OP-TEE Device Tree doesn't exist!\n");
+		return (-1);
+	}
+
+	ret = fdt_setprop_u32(fdt, offs, "busfreq", 1);
+	if (ret < 0)
+		return (-1);
+
+	return 0;
+}
+#else
+static int dt_busfreq(void)
+{
+	return 0;
+}
+#endif
+/**
+ * @brief   Allocation and initialization of the OCRAM
+ *          into which memory frequency change function
+ *          will be executed
+ *
+ * @retval  TEE_SUCCESS         Success
+ * @retval  TEE_ERROR_GENERIC   Generic Error
+ */
+static TEE_Result busfreq_init(void)
+{
+	TEE_Result ret = TEE_ERROR_GENERIC;
+
+	if (soc_is_imx6())
+		ret = imx6_busfreq_init();
+	else if (soc_is_imx7ds())
+		ret = imx7_busfreq_init();
+
+	if (ret == TEE_SUCCESS) {
+		if (dt_busfreq()) {
+			EMSG("Failed to config Bus Frequency");
+			ret = TEE_ERROR_GENERIC;
+		}
+	}
+
+	return ret;
+}
+
+service_init(busfreq_init);
diff --git a/core/drivers/pm/imx/busfreq/busfreq_asm_imx7.S b/core/drivers/pm/imx/busfreq/busfreq_asm_imx7.S
new file mode 100644
index 000000000..ca994f384
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_asm_imx7.S
@@ -0,0 +1,724 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_asm_imx7.S
+ *
+ * @brief   Implementation of the bus frequency
+ *          assembly function on i.MX7 devices
+ */
+/* Global includes */
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+
+#include <imx-regs.h>
+/* Generated file */
+#include <generated/busfreq_imx7_defines.h>
+
+#define MX7_IOMUX_GPR8	IOMUX_GPRx_OFFSET(8)
+
+#define CCM_TARGET_ROOT49  CCM_TARGET_ROOTx(49)
+#define CCM_TARGET_ROOT65  CCM_TARGET_ROOTx(65)
+
+#define REV_1_1		0x11
+
+	.macro switch_to_533mhz_ddr3_start
+
+	mov	r0, #0x2
+	str	r0, [r9, #MX7_DDRC_DBG1]
+
+	mov	r0, #0x78
+	str	r0, [r11, #MX7_IOMUX_GPR8]
+	orr	r0, r0, #0x100
+	str	r0, [r11, #MX7_IOMUX_GPR8]
+
+	mov	r1, #(0x3 << 28)
+1:
+	ldr	r0, [r9, #MX7_DDRC_DBGCAM]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	1b
+
+	mov	r1, #0x1
+2:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	2b
+
+	ldr	r0, =0x10f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	mov	r0, #0x1
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800010f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r0, #0x20
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	mov	r1, #0x23
+3:
+	ldr	r0, [r9, #MX7_DDRC_STAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	3b
+
+	ldr	r0, =0x03040001
+	str	r0, [r9, #MX7_DDRC_MSTR]
+
+	ldr	r0, =0x40800020
+	str	r0, [r9, #MX7_DDRC_ZQCTL0]
+
+	ldr	r0, =0x10210100
+	str	r0, [r8, #MX7_DDRPHY_PHY_CON1]
+
+	ldr	r0, =0x00040046
+	str	r0, [r9, #MX7_DDRC_RFSHTMG]
+	.endm
+
+	.macro switch_to_533mhz_ddr3_end
+	cmp	r7, #REV_1_1
+	bne	4f
+
+	ldr	r0, =0x40404040
+	str	r0, [r8, #MX7_DDRPHY_CA_WLDSKEW_CON0]
+	ldr	r0, =0x18181818
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	ldr	r0, =0x40401818
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+	b	5f
+4:
+	mov	r0, #0x0
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+5:
+	ldr	r0, =0x11000008
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+	ldr	r0, =0x10000008
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+
+	mov	r1, #0x4
+6:
+	ldr	r0, [r8, #MX7_DDRPHY_MDLL_CON1]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	6b
+
+	mov	r0, #0x1
+	str	r0, [r9, #MX7_DDRC_RFSHCTL3]
+	mov	r0, #0x3
+	str	r0, [r9, #MX7_DDRC_RFSHCTL3]
+
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	mov	r1, #0x1
+7:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	7b
+
+	ldr	r0, =0x10f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800010f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r1, #0x1
+8:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	8b
+
+	mov	r0, #0xf0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	ldr	r0, =0x930
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800000f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_RFSHCTL3]
+	mov	r0, #0x2
+	str	r0, [r9, #MX7_DDRC_RFSHCTL3]
+
+	mov	r1, #0x1
+9:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	9b
+
+	mov	r0, #0xf0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	ldr	r0, =0x930
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800000f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r1, #0x1
+10:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	10b
+
+	ldr	r0, =0x20f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	ldr	r0, =0x408
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800020f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r1, #0x1
+11:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	11b
+
+	ldr	r0, =0x10f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	ldr	r0, =0x4
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800010f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	ldr	r0, =0x0
+	str	r0, [r9, #MX7_DDRC_DBG1]
+
+	/* enable auto self-refresh */
+	ldr	r0, [r9, #MX7_DDRC_PWRCTL]
+	orr	r0, r0, #(1 << 0)
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	.endm
+
+	.macro switch_to_533mhz_lpddr_end
+
+   	/* LPDDR2 and LPDDR3 has different setting */
+	cmp r6, #IMX_DDR_TYPE_LPDDR2
+	beq	21f
+
+	cmp	r7, #REV_1_1
+	bne	23f
+
+	ldr	r0, =0x08080808
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	ldr	r0, =0x0a0a0808
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+	ldr	r0, =0x0a0a0a0a
+	str	r0, [r8, #MX7_DDRPHY_CA_WLDSKEW_CON0]
+	b	23f
+21:
+	cmp	r7, #REV_1_1
+	bne	22f
+
+	ldr	r0, =0x1c1c1c1c
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	ldr	r0, =0x30301c1c
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+	ldr	r0, =0x30303030
+	str	r0, [r8, #MX7_DDRPHY_CA_WLDSKEW_CON0]
+	b	23f
+22:
+	ldr	r0, =0x08080808
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	ldr	r0, =0x0808
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+23:
+	ldr	r0, =0x11000008
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+	ldr	r0, =0x10000008
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+
+	mov	r1, #0x4
+24:
+	ldr	r0, [r8, #MX7_DDRPHY_MDLL_CON1]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	24b
+
+	.endm
+
+	.macro switch_to_533mhz
+	cmp r6, #IMX_DDR_TYPE_DDR3
+	beq 101f
+
+	/* LPDDR3 or LPDDR3 switch to 533 MHZ beginning */
+	ldr	r0, =0x10210100
+	str	r0, [r8, #MX7_DDRPHY_PHY_CON1]
+
+	ldr	r0, =0x00200038
+	str	r0, [r8, #MX7_DDRPHY_RFSHTMG]
+
+	b	102f
+101:
+	switch_to_533mhz_ddr3_start
+
+102:
+	/* dram root set to from dram main, div by 2 */
+	ldr	r0, =0x10000001
+	ldr r1, =CCM_TARGET_ROOT49
+	str	r0, [r10, r1]
+
+	ldr	r0, =0x1010007e
+	str	r0, [r8, #MX7_DDRPHY_MDLL_CON0]
+
+	ldr	r0, =0x10000008
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+
+	ldr	r0, =0x08080808
+	str	r0, [r8, #MX7_DDRPHY_OFFSETR_CON0]
+	str	r0, [r8, #MX7_DDRPHY_OFFSETR_CON1]
+	mov	r0, #0x8
+	str	r0, [r8, #MX7_DDRPHY_OFFSETR_CON2]
+
+	ldr	r0, =0x08080808
+	str	r0, [r8, #MX7_DDRPHY_OFFSETW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_OFFSETW_CON1]
+	mov	r0, #0x8
+	str	r0, [r8, #MX7_DDRPHY_OFFSETW_CON2]
+
+	cmp	r6, #IMX_DDR_TYPE_DDR3
+	beq	103f
+
+	switch_to_533mhz_lpddr_end
+	b	104f
+103:
+	switch_to_533mhz_ddr3_end
+
+104:
+	.endm
+
+
+	.macro switch_below_100mhz_ddr3_start
+
+	mov	r0, #0x2
+	str	r0, [r9, #MX7_DDRC_DBG1]
+
+	mov	r1, #(0x36 << 24)
+31:
+	ldr	r0, [r9, #MX7_DDRC_DBGCAM]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	31b
+
+	mov	r1, #0x1
+32:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	32b
+
+	ldr	r0, =0x10f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800010f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r1, #0x1
+33:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	33b
+
+	ldr	r0, =0x20f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	mov	r0, #0x8
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800020f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r1, #0x1
+34:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	34b
+
+	ldr	r0, =0x10f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	ldr	r0, =0x1
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800010f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r0, #0x20
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	mov	r1, #0x23
+35:
+	ldr	r0, [r9, #MX7_DDRC_STAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	35b
+
+	mov r0, #0x0
+	str	r0, [r9, #MX7_DDRC_SWCTL]
+
+	ldr	r0, =0x03048001
+	str	r0, [r9, #MX7_DDRC_MSTR]
+
+	mov	r0, #0x1
+	str	r0, [r9, #MX7_DDRC_SWCTL]
+
+	mov	r1, #0x1
+36:
+	ldr	r0, [r9, #MX7_DDRC_SWSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	36b
+
+	ldr	r0, =0x10010100
+	str	r0, [r8, #MX7_DDRPHY_PHY_CON1]
+
+	.endm
+
+	.macro switch_below_100mhz_ddr3_end
+
+	ldr	r0, =0x1100007f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+	ldr	r0, =0x1000007f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	mov	r1, #0x1
+37:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	37b
+
+	mov	r0, #0xf0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+	ldr	r0, =0x820
+	str	r0, [r9, #MX7_DDRC_MRCTRL1]
+	ldr	r0, =0x800000f0
+	str	r0, [r9, #MX7_DDRC_MRCTRL0]
+
+	mov	r1, #0x1
+38:
+	ldr	r0, [r9, #MX7_DDRC_MRSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	38b
+
+	ldr	r0, =0x800020
+	str	r0, [r9, #MX7_DDRC_ZQCTL0]
+
+	ldr	r0, =0x0
+	str	r0, [r9, #MX7_DDRC_DBG1]
+
+	/* enable auto self-refresh */
+	ldr	r0, [r9, #MX7_DDRC_PWRCTL]
+	orr	r0, r0, #(1 << 0)
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	.endm
+
+
+	.macro switch_below_100mhz
+
+	cmp	r6, #IMX_DDR_TYPE_DDR3
+	beq	203f
+
+	cmp	r6, #IMX_DDR_TYPE_LPDDR2
+	beq	201f
+
+	/* LPDDR3 */
+	mov	r0, #0x100
+	str	r0, [r8, #MX7_DDRPHY_PHY_CON1]
+	b	202f
+
+201:
+	/* LPDDR2 */
+	ldr	r0, =0x10010100
+	str	r0, [r8, #MX7_DDRPHY_PHY_CON1]
+
+202:
+	ldr	r0, =0x00020038
+	str	r0, [r8, #MX7_DDRPHY_RFSHTMG]
+
+	b	204f
+
+203:
+	switch_below_100mhz_ddr3_start
+
+204:
+	/* Get the Frequency to switch to */
+	ldr	r0, [r12, #BUSFREQ_INFO_FREQ]
+	ldr	r1, =FREQ_24MHZ
+	cmp	r0, r1
+	beq	25f
+
+	ldr	r0, =0x000B000D
+	str	r0, [r9, #MX7_DDRC_RFSHTMG]
+	b	205f
+
+25:
+	ldr	r0, =0x00030004
+	str	r0, [r9, #MX7_DDRC_RFSHTMG]
+
+	/* dram alt sel set to OSC */
+	ldr	r0, =0x10000000
+	ldr	r1, =CCM_TARGET_ROOT65
+	str	r0, [r10, r1]
+	/* dram root set to from dram alt, div by 1 */
+	ldr	r0, =0x11000000
+	ldr	r1, =CCM_TARGET_ROOT49
+	str	r0, [r10, r1]
+	b	206f
+
+205:
+	/* dram alt sel set to pfd0_392m */
+	ldr	r0, =0x15000000
+	ldr	r1, =CCM_TARGET_ROOT65
+	str	r0, [r10, r1]
+	/* dram root set to from dram alt, div by 4 */
+	ldr	r0, =0x11000003
+	ldr	r1, =CCM_TARGET_ROOT49
+	str	r0, [r10, r1]
+206:
+	ldr	r0, =0x202ffd0
+	str	r0, [r8, #MX7_DDRPHY_MDLL_CON0]
+
+	cmp	r6, #IMX_DDR_TYPE_DDR3
+	ldreq	r0, =0x1000007f
+	movne	r0, #0x7f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+
+	ldr	r0, =0x7f7f7f7f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETR_CON0]
+	str	r0, [r8, #MX7_DDRPHY_OFFSETR_CON1]
+	mov	r0, #0x7f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETR_CON2]
+
+	ldr	r0, =0x7f7f7f7f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_OFFSETW_CON1]
+	mov	r0, #0x7f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETW_CON2]
+
+	cmp	r7, #REV_1_1
+	bne	207f
+
+	mov	r0, #0x0
+	str	r0, [r8, #MX7_DDRPHY_CA_WLDSKEW_CON0]
+	ldr	r0, =0x60606060
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	ldr	r0, =0x00006060
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+	b	208f
+207:
+	mov	r0, #0x0
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON0]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON1]
+	str	r0, [r8, #MX7_DDRPHY_CA_DSKEW_CON2]
+
+208:
+	cmp	r6, #IMX_DDR_TYPE_DDR3
+	beq	209f
+
+	/* LPDDR3 or LPDDR2 switch below 100 MHZ completion */
+	ldr	r0, =0x100007f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+	mov	r0, #0x7f
+	str	r0, [r8, #MX7_DDRPHY_OFFSETD_CON0]
+
+	b	210f
+
+209:
+	switch_below_100mhz_ddr3_end
+
+210:
+	.endm
+
+	.macro prepare_lpddr
+
+	/* disable port */
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_MP_PCTRL0]
+
+	/* wait port busy done */
+	ldr	r1, =0x10001
+301:
+	ldr	r0, [r9, #MX7_DDRC_MP_PSTAT]
+	and	r0, r0, r1
+	cmp	r0, #0
+	bne	301b
+
+	mov	r0, #0x20
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	mov	r1, #0x23
+302:
+	ldr	r0, [r9, #MX7_DDRC_STAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	302b
+
+	mov	r0, #0x1
+	str	r0, [r9, #MX7_DDRC_DBG1]
+
+	mov	r1, #(0x3 << 28)
+303:
+	ldr	r0, [r9, #MX7_DDRC_DBGCAM]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	303b
+
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_SWCTL]
+
+	str	r0, [r9, #MX7_DDRC_DFIMISC]
+
+	mov	r0, #0x1
+	str	r0, [r9, #MX7_DDRC_SWCTL]
+
+	mov r1, #0x1
+304:
+	ldr	r0, [r9, #MX7_DDRC_SWSTAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	bne	304b
+
+	.endm
+
+	.macro done_lpddr
+
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	mov	r1, #0x3
+311:
+	ldr	r0, [r9, #MX7_DDRC_STAT]
+	and	r0, r0, r1
+	cmp	r0, r1
+	beq	311b
+
+	mov	r0, #0x0
+	str	r0, [r9, #MX7_DDRC_DBG1]
+
+	mov	r0, #0x1
+	str	r0, [r9, #MX7_DDRC_MP_PCTRL0]
+
+	/* enable auto self-refresh */
+	ldr	r0, [r9, #MX7_DDRC_PWRCTL]
+	orr	r0, r0, #(1 << 0)
+	str	r0, [r9, #MX7_DDRC_PWRCTL]
+
+	.endm
+
+/**
+ * @brief   Switch the i.MX7 DDR in self-refresh to
+ *          change its clock and exit self-refresh
+ *        IRQs must be disabled.
+ *
+ * @param[in/out] busfreq_info  busfreq data
+ */
+.align 3
+FUNC imx7_ddr_freq_change , :
+	push	{r1-r12, lr}
+
+	/* Save the input parameters */
+	push	{r0}
+
+	/* Disable D$ */
+	ldr	r11, =cpu_mmu_disable_dcache
+	mov	lr, pc
+	mov	pc, r11
+
+	/* Disable Prediction */
+	read_sctlr	r2
+	bic	r2, r2, #SCTLR_Z
+	write_sctlr	r2
+	dsb
+	isb
+
+	pop	{r12}
+
+	/* Keep r8 to r12 as is */
+	ldr	r8,  [r12, #BUSFREQ_INFO_DDRC_PHY_BASE]
+	ldr	r9,  [r12, #BUSFREQ_INFO_DDRC_BASE]
+	ldr	r10, [r12, #BUSFREQ_INFO_CCM_BASE]
+	ldr	r11, [r12, #BUSFREQ_INFO_IOMUX_BASE]
+
+	/* Make sure TLB preloaded */
+	ldr	r0, [r8]
+	ldr	r0, [r9]
+	ldr	r0, [r10]
+	ldr	r0, [r11]
+
+	/* Get the device revision - keep it in r7 */
+	ldrh	r7, [r12, #BUSFREQ_INFO_REV]
+
+	/* Get the DDR Type in r6 - keep it in r6 */
+	ldrh	r6, [r12, #BUSFREQ_INFO_DDR_TYPE]
+	cmp	r6, #IMX_DDR_TYPE_DDR3
+	beq	bypass_prepare_lpddr
+
+	prepare_lpddr
+
+bypass_prepare_lpddr:
+	/* Get the Bus Freq to switch on */
+	ldr	r0, [r12, #BUSFREQ_INFO_FREQ]
+	ldr	r1, =FREQ_100MHZ
+	cmp	r0, r1
+	bgt	set_freq_to_533mhz
+
+	switch_below_100mhz
+	b	setup_lpddr_done
+
+set_freq_to_533mhz:
+	switch_to_533mhz
+
+setup_lpddr_done:
+	cmp	r6, #IMX_DDR_TYPE_DDR3
+	beq	bypass_done_lpddr
+
+	/* Complete LPDDR switch */
+	done_lpddr
+
+bypass_done_lpddr:
+
+	/* Enable D$ and Prediction */
+	read_sctlr	r2
+	mov	r3, #SCTLR_C
+	add	r3, r3, #SCTLR_Z
+	orr	r2, r2, r3
+	write_sctlr	r2
+	isb
+
+ 	/* restore registers */
+	pop	{r1-r12, lr}
+	mov	pc, lr
+
+	/* Ensure that all constant will be stored here */
+	.ltorg
+
+	.equ	FREQ_100MHZ, 100000000
+	.equ	FREQ_24MHZ,  24000000
+
+END_FUNC imx7_ddr_freq_change
+
+FUNC get_imx7_ddr_freq_change_size , :
+	subs	r0, pc, #8
+	ldr	r1, =imx7_ddr_freq_change
+	sub	r0, r0, r1
+	bx	lr
+END_FUNC get_imx7_ddr_freq_change_size
diff --git a/core/drivers/pm/imx/busfreq/busfreq_ddr3_imx6.S b/core/drivers/pm/imx/busfreq/busfreq_ddr3_imx6.S
new file mode 100644
index 000000000..82bb7a555
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_ddr3_imx6.S
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_ddr3_imx6.S
+ *
+ * @brief   Implementation of the bus frequency DDR3
+ *          assembly function on i.MX6 devices
+ */
+/* Global includes */
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+
+/* Generated file */
+#include <generated/busfreq_imx6_defines.h>
+
+/* Local includes */
+#include "busfreq_imx6.h"
+
+#include "busfreq_macro_imx6.S"
+
+	.align	3
+/**
+ * @brief   Switch the i.MX6 DDR3 in self-refresh to
+ *          change its clock and exit self-refresh
+ *	        IRQs must be disabled.
+ *
+ * @param[in/out] busfreq_info  busfreq data
+ */
+
+FUNC imx6_ddr3_freq_change, :
+	push	{r1-r12, lr}
+
+	/* Save the input parameters */
+	push	{r0}
+
+	ldr		r11, =cpu_mmu_disable_dcache
+	mov		lr, pc
+	mov		pc, r11
+
+	pop		{r12}
+
+#ifdef CFG_PL310
+	/* Disable the L2$ */
+	ldr		r0, [r12, #BUSFREQ_INFO_PL310_V_BASE]
+	arm_cl2_disable
+#endif
+
+bypass_l2_disable:
+	tlb_set_to_ocram r12
+
+	/* Keep r9, r10, r11 and r12 as is */
+	ldr	r9,  [r12, #BUSFREQ_INFO_MMDC_BASE]
+	ldr	r10, [r12, #BUSFREQ_INFO_CCM_BASE]
+	ldr	r11, [r12, #BUSFREQ_INFO_IOMUX_BASE]
+
+	/* Make sure TLB preloaded */
+	ldr r0, [r10]
+	ldr r0, [r11]
+
+	is_mx6ul_or_sx
+	moveq	r8, #0
+	beq	1f
+
+	/* r8 = MMDC Port 2 */
+	ldr r8, =MX6_MMDC2_OFFSET
+	add r8, r8, r9
+
+1:
+	/* Read the Original MU delay value */
+	/* Value must be read before doing any MMDC configuration changes */
+	ldr	r0, [r9, #MX6_MMDC_MPMUR0]
+	mov	r7, r0, lsr #BP_MX6_MMDC_MPMUR0_MU_UNIT_DEL_NUM
+	ldr	r0, =0x3FF
+	and	r7, r7, r0
+
+	/* disable automatic power saving. */
+	ldr	r0, [r9, #MX6_MMDC_MAPSR]
+	orr	r0, r0, #0x1
+	str	r0, [r9, #MX6_MMDC_MAPSR]
+
+	/* disable MMDC power down timer. */
+	ldr	r0, [r9, #MX6_MMDC_MDPDC]
+	bic	r0, r0, #(0xFF << 8)
+	str	r0, [r9, #MX6_MMDC_MDPDC]
+
+	do_wait #4
+
+	/* set CON_REG */
+	con_req_ack_set
+
+	/* Load the requested frequency */
+	ldr r4, [r12, #BUSFREQ_INFO_FREQ]
+
+	/* Check if we are on imx6ul or 6sx */
+	is_mx6ul_or_sx
+	beq	2f
+
+	/*
+	 * if requested frequency is great than
+	 * 300MHz, skip setting bypass adopt mode.
+	 */
+	ldr r0, =FREQ_300MHZ
+	cmp	r4, r0
+	bge	1f
+
+	is_mx6qp
+	bne	1f
+	/* Switch to adopt mode, set MMDC0_MAARCR bit25~26 to 2b'01 */
+	ldr	r0, [r9, #MX6_MMDC_MAARCR]
+	bic	r0, r0, #(0x3 << 25)
+	orr	r0, r0, #(0x1 << 25)
+	str	r0, [r9, #MX6_MMDC_MAARCR]
+1:
+	ldr	r0, =0x00008050
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+	ldr	r0, =0x00008058
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+2:
+	/*
+	 * if requested frequency is greater than
+	 * 300MHz go to DLL on mode.
+	 */
+	ldr	r0, =FREQ_300MHZ
+	cmp	r4, r0
+	bge	dll_on_mode
+
+dll_off_mode:
+	ldr r0, [r12, #BUSFREQ_INFO_DLL_OFF]
+	/* if DLL is currently on, turn it off. */
+	cmp	r0, #1
+	beq	continue_dll_off_1
+
+	ldr	r0, =0x00018031
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, =0x00018039
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	do_wait #10
+
+continue_dll_off_1:
+	/* set DVFS - enter self refresh mode */
+	ldr	r0, [r9, #MX6_MMDC_MAPSR]
+	orr	r0, r0, #(1 << 21)
+	str	r0, [r9, #MX6_MMDC_MAPSR]
+
+	/* de-assert con_req */
+	mov	r0, #0x0
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	dvfs_ack_set
+
+	ldr	r0, =FREQ_24MHZ
+	cmp	r4, r0
+	beq	switch_freq_24
+
+	switch_to_50MHz
+	b	continue_dll_off_2
+
+switch_freq_24:
+	switch_to_24MHz
+
+continue_dll_off_2:
+	/* set SBS - block ddr accesses */
+	ldr	r0, [r9, #MX6_MMDC_MADPCR0]
+	orr	r0, r0, #(1 << 8)
+	str	r0, [r9, #MX6_MMDC_MADPCR0]
+
+	/* clear DVFS - exit from self refresh mode */
+	exit_dvfs
+
+	/* if DLL was previously on, continue DLL off routine. */
+	ldr r0, [r12, #BUSFREQ_INFO_DLL_OFF]
+	cmp r0, #1
+	beq continue_dll_off_3
+
+	is_mx6ul_or_sx
+	ldr	r0, =0x00018031
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, =0x00018039
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldreq	r0, =0x04208030
+	ldrne	r0, =0x08208030
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldreq	r0, =0x04208038
+	ldrne	r0, =0x08208038
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, =0x00088032
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, =0x0008803A
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	/* delay for a while. */
+	do_wait #4
+
+	ldr	r0, [r9, #MX6_MMDC_MDCFG0]
+	bic	r0, r0, #0xf
+	orr	r0, r0, #0x3
+	str	r0, [r9, #MX6_MMDC_MDCFG0]
+
+	ldr	r0, [r9, #MX6_MMDC_MDCFG1]
+	bic	r0, r0, #0x7
+	orr	r0, r0, #0x4
+	str	r0, [r9, #MX6_MMDC_MDCFG1]
+
+	ldr	r0, [r9, #MX6_MMDC_MDMISC]
+	bic	r0, r0, #(0x3 << 16) // set walat = 0x1
+	orr r0, r0, #(0x1 << 16)
+	bic	r0, r0, #(0x7 << 6)  // set ralat = 0x2
+	orr r0, r0, #(0x2 << 6)
+	str	r0, [r9, #MX6_MMDC_MDMISC]
+
+	/* enable dqs pull down in the IOMUX. */
+	ldr r1, [r12, #BUSFREQ_INFO_IOMUX_OFFSETS]
+	ldr r4, [r12, #BUSFREQ_INFO_IOMUX_OFFSETS_SIZE]
+	ldr	r2, =0x3028
+update_iomux:
+	ldr	r0, [r1], #8
+	ldr	r3, [r11, r0]
+	orr r3, r3, r2
+	str	r3, [r11, r0]
+	sub	r4, r4, #1
+	cmp	r4, #0
+	bgt	update_iomux
+
+	/*	ODT disabled. */
+	mov	r0, #0
+	ldr	r1, =MX6_MMDC_MPODTCTRL
+	str	r0, [r9, r1]
+	/* is channel 2 */
+	cmp r8, #0
+	strne	r0, [r8, r1]
+
+	/* DQS gating disabled. */
+	ldr	r1, =MX6_MMDC_MPDGCTRL0
+	ldr	r0, [r9, r1]
+	orr	r0, r0, #(1 << 29)
+	str	r0, [r9, r1]
+
+	/* is channel 2 */
+	ldrne	r0, [r8, r1]
+	orrne	r0, r0, #(0x1 << 29)
+	strne	r0, [r8, r1]
+
+	/* Add workaround for ERR005778.*/
+	/* double the original MU_UNIT_DEL_NUM. */
+	lsl	r7, r7, #1
+
+	/* Bypass the automatic MU by setting the mu_byp_en */
+	ldr r1, =MX6_MMDC_MPMUR0
+	ldr	r0, [r9, r1]
+	orr	r0, r0, #0x400
+	orr	r0, r0, r7
+	str	r0, [r9, r1]
+	/* is channel 2 */
+	strne	r0, [r8, r1]
+
+	/* Now perform a force measure */
+	ldr	r0, [r9, r1]
+	orr	r0, r0, #0x800
+	str	r0, [r9, r1]
+	/* is channel 2 */
+	strne	r0, [r8, r1]
+
+	/* Wait for FRC_MSR to clear. */
+1:
+	ldr	r0, [r9, r1]
+	and	r0, r0, #0x800
+	/* is channel 2 */
+	cmp r8, #0
+	ldrne	r3, [r8, r1]
+	andne	r3, r3, #0x800
+	orrne	r0, r0, r3
+	cmp	r0, #0x0
+	bne	1b
+
+continue_dll_off_3:
+	/* clear SBS - unblock accesses to DDR. */
+	ldr	r0, [r9, #MX6_MMDC_MADPCR0]
+	bic	r0, r0, #(0x1 << 8)
+	str	r0, [r9, #MX6_MMDC_MADPCR0]
+
+	con_req_ack_clr
+
+	b	done
+
+dll_on_mode:
+	/* assert DVFS - enter self refresh mode. */
+	ldr	r0, [r9, #MX6_MMDC_MAPSR]
+	orr	r0, r0, #(1 << 21)
+	str	r0, [r9, #MX6_MMDC_MAPSR]
+
+	/* de-assert CON_REQ. */
+	mov	r0, #0x0
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	/* poll DVFS ack. */
+	dvfs_ack_set
+
+	/* Load the requested frequency */
+	ldr r4, [r12, #BUSFREQ_INFO_FREQ]
+	ldr	r1, =FREQ_528MHZ
+	cmp	r4, r1
+	beq	switch_freq_528
+
+	switch_to_400MHz
+
+	b	continue_dll_on
+
+switch_freq_528:
+	switch_to_528MHz
+
+continue_dll_on:
+
+	/* set SBS step-by-step mode. */
+	ldr	r0, [r9, #MX6_MMDC_MADPCR0]
+	orr	r0, r0, #(1 << 8)
+	str	r0, [r9, #MX6_MMDC_MADPCR0]
+
+	/* clear DVFS - exit self refresh mode. */
+	exit_dvfs
+
+	/* if DLL is currently off, turn it back on. */
+	ldr r0, [r12, #BUSFREQ_INFO_DLL_OFF]
+	cmp	r0, #0
+	beq	update_calibration_only
+
+	ldr r1, =MX6_MMDC_MPZQHWCTRL
+	ldr	r0, [r9, r1]
+	orr	r0, r0, #0x3
+	str	r0, [r9, r1]
+	/* is channel 2 */
+	cmp	r8, #0
+	strne	r0, [r8, r1]
+
+	/* enable DQS gating. */
+	ldr	r1, =MX6_MMDC_MPDGCTRL0
+	ldr	r0, [r9, r1]
+	bic	r0, r0, #(1 << 29)
+	str	r0, [r9, r1]
+
+	/* is channel 2 */
+	ldrne	r0, [r8, r1]
+	bicne	r0, r0, #(1 << 29)
+	strne	r0, [r8, r1]
+
+	/* force measure. */
+	mov	r0, #0x800
+	ldr r1, =MX6_MMDC_MPMUR0
+	str	r0, [r9, r1]
+	/* is channel 2 */
+	strne	r0, [r8, r1]
+
+	/* Wait for FRC_MSR to clear. */
+1:
+	ldr	r0, [r9, r1]
+	and	r0, r0, #0x800
+	/* is channel 2 */
+	cmp	r8, #0
+	ldrne	r3, [r8, r1]
+	andne	r3, r3, #0x800
+	orrne	r0, r0, r3
+	cmp	r0, #0x0
+	bne	1b
+
+	/* disable dqs pull down in the IOMUX. */
+	ldr r1, [r12, #BUSFREQ_INFO_IOMUX_OFFSETS]
+	ldr r3, [r12, #BUSFREQ_INFO_IOMUX_OFFSETS_SIZE]
+update_iomux1:
+	ldr	r0, [r1], #0x4
+	ldr	r2, [r1], #0x4
+	str	r2, [r11, r0]
+	sub	r3, r3, #1
+	cmp	r3, #0
+	bgt	update_iomux1
+
+	/* config MMDC timings to High MHz. */
+	ldr r3, [r12, #BUSFREQ_INFO_DDR_SETTINGS]
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	is_mx6ul_or_sx
+	/* configure ddr devices to dll on, odt. */
+	ldreq r0, =0x00028031
+	ldrne r0, =0x00048031
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldreq r0, =0x00028039
+	ldrne r0, =0x00048039
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	/* delay for while. */
+	do_wait #4
+
+	/* reset dll. */
+	is_mx6ul_or_sx
+	ldreq r0, =0x09208030
+	ldrne r0, =0x09408030
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldreq r0, =0x09208038
+	ldrne r0, =0x09408038
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	/* delay for while. */
+	do_wait #100
+
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	ldr	r0, =0x00428031
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, =0x00428039
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3], #0x4
+	str	r1, [r9, r0]
+
+	/* issue a zq command. */
+	ldr	r0, =0x04008040
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	ldr	r0, =0x04008048
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+
+	/* MMDC ODT enable. */
+	ldr	r0, [r3], #0x4
+	ldr	r1, [r3]
+	str	r1, [r9, r0]
+
+	/* is channel 2 */
+	cmp	r8, #0
+	ldrne	r2, =MX6_MMDC_MPODTCTRL
+	strne	r1, [r8, r2]
+
+	/* delay for while. */
+	do_wait #40
+
+	/* enable MMDC power down timer. */
+	ldr	r0, [r9, #MX6_MMDC_MDPDC]
+	orr	r0, r0, #(0x55 << 8)
+	str	r0, [r9, #MX6_MMDC_MDPDC]
+
+update_calibration_only:
+	/* write the new calibration values. */
+	ldr r3, [r12, #BUSFREQ_INFO_DDR_CALIBRATION]
+	ldr r1, [r12, #BUSFREQ_INFO_DDR_CALIBRATION_SIZE]
+
+update_calib:
+	ldr	r0, [r3], #0x4
+	ldr	r2, [r3], #0x4
+	str	r2, [r9, r0]
+	sub	r1, r1, #1
+	cmp	r1, #0
+	bgt	update_calib
+
+	/* perform a force measurement. */
+	mov r0, #0x800
+	ldr r1, =MX6_MMDC_MPMUR0
+	str	r0, [r9, r1]
+	/* is channel 2 */
+	cmp	r8, #0
+	strne	r0, [r8, r1]
+
+	/* Wait for FRC_MSR to clear. */
+1:
+	ldr	r0, [r9, r1]
+	and	r0, r0, #0x800
+	/* is channel 2 */
+	cmp	r8, #0
+	ldrne	r3, [r8, r1]
+	andne	r3, r3, #0x800
+	orrne	r0, r0, r3
+	cmp	r0, #0x0
+	bne	1b
+
+force_measurement_both_channels_done:
+	/* clear SBS - unblock DDR accesses. */
+	ldr	r0, [r9, #MX6_MMDC_MADPCR0]
+	bic	r0, r0, #(1 << 8)
+	str	r0, [r9, #MX6_MMDC_MADPCR0]
+
+	is_mx6qp
+	bne	3f
+	/*
+	 * Switch back to adopt_bp mode, set MMDC0_MAARCR
+	 * bit25~26 to 2b'10.
+	 */
+	ldr	r0, [r9, #MX6_MMDC_MAARCR]
+	bic	r0, r0, #(0x3 << 25)
+	orr	r0, r0, #(0x2 << 25)
+	str	r0, [r9, #MX6_MMDC_MAARCR]
+3:
+	con_req_ack_clr
+
+done:
+	/* MMDC0_MAPSR adopt power down enable. */
+	ldr	r0, [r9, #MX6_MMDC_MAPSR]
+	bic	r0, r0, #0x1
+	str	r0, [r9, #MX6_MMDC_MAPSR]
+
+	tlb_back_to_ddr r12
+
+#ifdef CFG_PL310
+	/* Enable the L2$ */
+	ldr		r0, [r12, #BUSFREQ_INFO_PL310_V_BASE]
+	ldr		r11, =arm_cl2_enable
+	mov		lr, pc
+	mov		pc, r11
+#endif
+
+bypass_l2_enable:
+    /* Enable D$ */
+    read_sctlr  r2
+    orr     r2, r2, #SCTLR_C
+    write_sctlr r2
+	isb
+
+	/* restore registers */
+	pop		{r1-r12, lr}
+	mov	pc, lr
+
+	/* Ensure that all constant will be stored here */
+	.ltorg
+
+	.equ	FREQ_528MHZ, 528000000
+	.equ	FREQ_300MHZ, 300000000
+	.equ	FREQ_24MHZ,   24000000
+
+END_FUNC imx6_ddr3_freq_change
+
+FUNC get_imx6_ddr3_freq_change_size , :
+	subs	r0, pc, #8
+	ldr		r1, =imx6_ddr3_freq_change
+	sub		r0, r0, r1
+	bx		lr
+END_FUNC get_imx6_ddr3_freq_change_size
diff --git a/core/drivers/pm/imx/busfreq/busfreq_imx6.c b/core/drivers/pm/imx/busfreq/busfreq_imx6.c
new file mode 100644
index 000000000..119545dcf
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_imx6.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_imx6.c
+ *
+ * @brief   Implementation of the bus frequency scaling.\n
+ *          Interface and initialization.
+ */
+/* Standard includes */
+#include <string.h>
+
+/* Global includes */
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_memprot.h>
+#include <tee_api_types.h>
+
+/* Platform includes */
+#include <platform_config.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <io.h>
+
+/* Local include */
+#include "busfreq_imx6.h"
+
+static uint32_t ddr3_dll_mx6sx_ul[][2] = {
+	{MX6_MMDC_MDCFG0,	 0x0},
+	{MX6_MMDC_MDCFG1,	 0x0},
+	{MX6_MMDC_MDMISC,    0x0},
+	{MX6_MMDC_MDSCR,	 0x04008032},
+	{MX6_MMDC_MDSCR,	 0x00048031},
+	{MX6_MMDC_MDSCR,	 0x05208030},
+	{MX6_MMDC_MDSCR,	 0x04008040},
+	{MX6_MMDC_MPODTCTRL, 0x0},
+};
+
+static uint32_t ddr3_calibration_mx6sx_ul[][2] = {
+	{MX6_MMDC_MPDGCTRL0, 0x0},
+	{MX6_MMDC_MPDGCTRL1, 0x0},
+	{MX6_MMDC_MPRDDLCTL, 0x0},
+	{MX6_MMDC_MPWRDLCTL, 0x0},
+};
+
+static uint32_t iomux_offsets_mx6sx[][2] = {
+	{0x330, 0x0},
+	{0x334, 0x0},
+	{0x338, 0x0},
+	{0x33c, 0x0},
+};
+
+static uint32_t iomux_offsets_mx6ul[][2] = {
+	{0x280, 0x0},
+	{0x284, 0x0},
+};
+
+static uint32_t ddr3_calibration[][2] = {
+	{MX6_MMDC_MPDGCTRL0,					0x0},
+	{MX6_MMDC_MPDGCTRL1,					0x0},
+	{MX6_MMDC_MPDGCTRL0 + MX6_MMDC2_OFFSET, 0x0},
+	{MX6_MMDC_MPDGCTRL1 + MX6_MMDC2_OFFSET, 0x0},
+	{MX6_MMDC_MPRDDLCTL,					0x0},
+	{MX6_MMDC_MPRDDLCTL + MX6_MMDC2_OFFSET, 0x0},
+	{MX6_MMDC_MPWRDLCTL,					0x0},
+	{MX6_MMDC_MPWRDLCTL + MX6_MMDC2_OFFSET, 0x0},
+};
+
+static uint32_t ddr3_dll_mx6q[][2] = {
+	{MX6_MMDC_MDCFG0,	 0x0},
+	{MX6_MMDC_MDCFG1,	 0x0},
+	{MX6_MMDC_MDMISC,    0x0},
+	{MX6_MMDC_MDSCR,	 0x04088032},
+	{MX6_MMDC_MDSCR,	 0x0408803a},
+	{MX6_MMDC_MDSCR,	 0x08408030},
+	{MX6_MMDC_MDSCR,	 0x08408038},
+	{MX6_MMDC_MPODTCTRL, 0x0},
+};
+
+static uint32_t ddr3_dll_mx6dl[][2] = {
+	{MX6_MMDC_MDCFG0,	 0x0},
+	{MX6_MMDC_MDCFG1,	 0x0},
+	{MX6_MMDC_MDMISC,    0x0},
+	{MX6_MMDC_MDSCR,	 0x04008032},
+	{MX6_MMDC_MDSCR,	 0x0400803a},
+	{MX6_MMDC_MDSCR,	 0x07208030},
+	{MX6_MMDC_MDSCR,	 0x07208038},
+	{MX6_MMDC_MPODTCTRL, 0x0},
+};
+
+static uint32_t iomux_offsets_mx6dl[][2] = {
+	{0x4BC, 0x0},
+	{0x4C0, 0x0},
+	{0x4C4, 0x0},
+	{0x4C8, 0x0},
+	{0x4CC, 0x0},
+	{0x4D0, 0x0},
+	{0x4D4, 0x0},
+	{0x4D8, 0x0},
+};
+
+static uint32_t iomux_offsets_mx6q[][2] = {
+	{0x5A8, 0x0},
+	{0x5B0, 0x0},
+	{0x524, 0x0},
+	{0x51C, 0x0},
+	{0x518, 0x0},
+	{0x50C, 0x0},
+	{0x5B8, 0x0},
+	{0x5C0, 0x0},
+};
+
+/* Local data */
+typedef void (*change_ddr_freq_func)(struct busfreq_info_mx6 *);
+static change_ddr_freq_func change_ddr_freq;
+static struct busfreq_info_mx6 *ddr_info;
+
+/**
+ * @brief   Setup the DDR3 info struct used in the busfreq assembly
+ *          function.
+ */
+static void imx6_ddr3_info_setup(void)
+{
+	uint32_t idx;
+	uint32_t (*table)[2];
+
+	/* Set CPU ID field if needed */
+	ddr_info->cpu_type = 0;
+
+	if (soc_is_imx6ul() || soc_is_imx6ull())
+		ddr_info->cpu_type = BUSFREQ_CPU_MX6UL;
+	else if (soc_is_imx6dqp() || soc_is_imx6dq() || soc_is_imx6sdl())
+		ddr_info->cpu_type = BUSFREQ_CPU_MX6QP;
+	else if (soc_is_imx6sx())
+		ddr_info->cpu_type = BUSFREQ_CPU_MX6SX;
+
+	/* Initialize the registers base address */
+	ddr_info->ccm_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC,
+					     CCM_SIZE);
+	ddr_info->iomux_base = core_mmu_get_va(IOMUXC_BASE, MEM_AREA_IO_SEC,
+					       IOMUXC_SIZE);
+	ddr_info->mmdc_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+					      MMDC_P0_SIZE);
+#ifdef CFG_PL310
+	/* Setup the PL310 virtual base address */
+	ddr_info->pl310_v_base = pl310_base();
+#endif
+	if (soc_is_imx6dq() || soc_is_imx6dqp()) {
+		/* Initialize the ddr_settings pointer and table */
+		ddr_info->ddr_settings = (void *)((uint8_t *)ddr_info +
+					sizeof(struct busfreq_info_mx6));
+		ddr_info->ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q);
+		memcpy(ddr_info->ddr_settings, ddr3_dll_mx6q,
+				sizeof(ddr3_dll_mx6q));
+
+		/* Initialize the calibration pointer and table */
+		ddr_info->ddr_calibration = (void *)(
+					(uint8_t *)ddr_info->ddr_settings +
+					sizeof(ddr3_dll_mx6q));
+		ddr_info->ddr_calibration_size = ARRAY_SIZE(ddr3_calibration);
+		memcpy(ddr_info->ddr_calibration, ddr3_calibration,
+				sizeof(ddr3_calibration));
+
+		/* Initialize the iomux pointer and table */
+		ddr_info->iomux_offsets = (void *)(
+					(uint8_t *)ddr_info->ddr_calibration +
+					sizeof(ddr3_calibration));
+		ddr_info->iomux_offsets_size = ARRAY_SIZE(iomux_offsets_mx6q);
+		memcpy(ddr_info->iomux_offsets, iomux_offsets_mx6q,
+				sizeof(iomux_offsets_mx6q));
+	} else if (soc_is_imx6sdl()) {
+		/* Initialize the ddr_settings pointer and table */
+		ddr_info->ddr_settings = (void *)((uint8_t *)ddr_info +
+					sizeof(struct busfreq_info_mx6));
+		ddr_info->ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl);
+		memcpy(ddr_info->ddr_settings, ddr3_dll_mx6dl,
+				sizeof(ddr3_dll_mx6dl));
+
+		/* Initialize the calibration pointer and table */
+		ddr_info->ddr_calibration = (void *)(
+					(uint8_t *)ddr_info->ddr_settings +
+					sizeof(ddr3_dll_mx6dl));
+		ddr_info->ddr_calibration_size = ARRAY_SIZE(ddr3_calibration);
+		memcpy(ddr_info->ddr_calibration, ddr3_calibration,
+				sizeof(ddr3_calibration));
+
+		/* Initialize the iomux pointer and table */
+		ddr_info->iomux_offsets = (void *)(
+					(uint8_t *)ddr_info->ddr_calibration +
+					sizeof(ddr3_calibration));
+		ddr_info->iomux_offsets_size = ARRAY_SIZE(iomux_offsets_mx6dl);
+		memcpy(ddr_info->iomux_offsets, iomux_offsets_mx6dl,
+				sizeof(iomux_offsets_mx6dl));
+	} else if (soc_is_imx6sx()) {
+		/* Initialize the ddr_settings pointer and table */
+		ddr_info->ddr_settings = (void *)((uint8_t *)ddr_info +
+					sizeof(struct busfreq_info_mx6));
+		ddr_info->ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6sx_ul);
+		memcpy(ddr_info->ddr_settings, ddr3_dll_mx6sx_ul,
+				sizeof(ddr3_dll_mx6sx_ul));
+
+		/* Initialize the calibration pointer and table */
+		ddr_info->ddr_calibration = (void *)(
+					(uint8_t *)ddr_info->ddr_settings +
+					sizeof(ddr3_dll_mx6sx_ul));
+		ddr_info->ddr_calibration_size =
+					ARRAY_SIZE(ddr3_calibration_mx6sx_ul);
+		memcpy(ddr_info->ddr_calibration, ddr3_calibration_mx6sx_ul,
+				sizeof(ddr3_calibration_mx6sx_ul));
+
+		/* Initialize the iomux pointer and table */
+		ddr_info->iomux_offsets = (void *)(
+					(uint8_t *)ddr_info->ddr_calibration +
+					sizeof(ddr3_calibration_mx6sx_ul));
+		ddr_info->iomux_offsets_size = ARRAY_SIZE(iomux_offsets_mx6sx);
+		memcpy(ddr_info->iomux_offsets, iomux_offsets_mx6sx,
+				sizeof(iomux_offsets_mx6sx));
+	} else if (soc_is_imx6ul() || soc_is_imx6ull()) {
+		/* Initialize the ddr_settings pointer and table */
+		ddr_info->ddr_settings = (void *)((uint8_t *)ddr_info +
+					sizeof(struct busfreq_info_mx6));
+		ddr_info->ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6sx_ul);
+		memcpy(ddr_info->ddr_settings, ddr3_dll_mx6sx_ul,
+				sizeof(ddr3_dll_mx6sx_ul));
+
+		/* Initialize the calibration pointer and table */
+		ddr_info->ddr_calibration = (void *)(
+					(uint8_t *)ddr_info->ddr_settings +
+					sizeof(ddr3_dll_mx6sx_ul));
+		ddr_info->ddr_calibration_size =
+					ARRAY_SIZE(ddr3_calibration_mx6sx_ul);
+		memcpy(ddr_info->ddr_calibration, ddr3_calibration_mx6sx_ul,
+				sizeof(ddr3_calibration_mx6sx_ul));
+
+		/* Initialize the iomux pointer and table */
+		ddr_info->iomux_offsets = (void *)(
+					(uint8_t *)ddr_info->ddr_calibration +
+					sizeof(ddr3_calibration_mx6sx_ul));
+		ddr_info->iomux_offsets_size = ARRAY_SIZE(iomux_offsets_mx6ul);
+		memcpy(ddr_info->iomux_offsets, iomux_offsets_mx6ul,
+				sizeof(iomux_offsets_mx6ul));
+	}
+
+	/* Read the IO MUX */
+	table = ddr_info->iomux_offsets;
+	for (idx = 0; idx < ddr_info->iomux_offsets_size; idx++)
+		table[idx][1] = io_read32(ddr_info->iomux_base + table[idx][0]);
+
+	/* Read the DDR Settings */
+	table = ddr_info->ddr_settings;
+	for (idx = 0; idx < ddr_info->ddr_settings_size; idx++)
+		if (table[idx][0] != MX6_MMDC_MDSCR)
+			table[idx][1] = io_read32(ddr_info->mmdc_base +
+						table[idx][0]);
+
+	/* Read the Calibration Settings */
+	table = ddr_info->ddr_calibration;
+	for (idx = 0; idx < ddr_info->ddr_calibration_size; idx++)
+		table[idx][1] = io_read32(ddr_info->mmdc_base + table[idx][0]);
+}
+
+/**
+ * @brief   Install the busfreq assembly function into the OCRAM
+ *          and fill the busfreq data
+ *
+ * @retval  TEE_SUCCESS         Success
+ * @retval  TEE_ERROR_GENERIC   Generic Error
+ */
+TEE_Result imx6_busfreq_init(void)
+{
+	int ddr_type;
+
+	size_t	function_size = 0;
+	size_t	data_size = 0;
+	size_t	page_size;
+
+	uint32_t ocram_start;
+	vaddr_t  tlb_ocram = core_mmu_get_va(
+			imx_get_ocram_tz_start_addr() + IRAM_TBL_OFFSET,
+			MEM_AREA_TEE_COHERENT, 0x40000);
+
+	/*
+	 * If the device is not 6SX using the OCRAM_S
+	 * continue to copy data/function after the suspend
+	 * and idle.
+	 * Else, because the TTB1 used to execute the assembly functions
+	 * idle/suspend/busfreq is 16 KBytes in OCRAM_S (16 KBytes max)
+	 * the code/data and TTB are mixed and must care of TTB entries used.
+	 * The Busfreq is installed at the end of the OCRAM_S
+	 */
+	if (!soc_is_imx6sx())
+		ocram_start = core_mmu_get_va(imx_get_ocram_tz_start_addr() +
+						      BUSFREQ_OCRAM_OFFSET,
+					      MEM_AREA_TEE_COHERENT,
+					      BUSFREQ_MAX_SIZE);
+	else
+		ocram_start = core_mmu_get_va(imx_get_ocram_tz_start_addr(),
+					      MEM_AREA_TEE_COHERENT,
+					      BUSFREQ_MAX_SIZE);
+
+	/* Get the type of memory used */
+	ddr_type = imx_get_ddr_type();
+
+	/*
+	 * Get the BusFreq function to be installed and called
+	 * specific OCRAM allocation for imx6q
+	 */
+	switch (ddr_type) {
+	case IMX_DDR_TYPE_LPDDR2:
+		change_ddr_freq = &imx6_lpddr2_freq_change;
+		function_size   = get_imx6_lpddr2_freq_change_size();
+		data_size       = sizeof(struct busfreq_info_mx6);
+		DMSG("DDR2 mem function size=%d, data size=%d",
+			  function_size, data_size);
+		break;
+
+	case IMX_DDR_TYPE_DDR3:
+		change_ddr_freq = &imx6_ddr3_freq_change;
+		function_size   = get_imx6_ddr3_freq_change_size();
+		data_size       = sizeof(struct busfreq_info_mx6);
+		if (soc_is_imx6dq() || soc_is_imx6dqp()) {
+			data_size += sizeof(ddr3_dll_mx6q) +
+					sizeof(ddr3_calibration) +
+					sizeof(iomux_offsets_mx6q);
+		} else if (soc_is_imx6sdl()) {
+			data_size += sizeof(ddr3_dll_mx6dl) +
+					sizeof(ddr3_calibration) +
+					sizeof(iomux_offsets_mx6dl);
+		} else if (soc_is_imx6sx()) {
+			data_size += sizeof(ddr3_dll_mx6sx_ul) +
+					sizeof(ddr3_calibration_mx6sx_ul) +
+					sizeof(iomux_offsets_mx6sx);
+		} else if (soc_is_imx6ul() || soc_is_imx6ull()) {
+			data_size += sizeof(ddr3_dll_mx6sx_ul) +
+					sizeof(ddr3_calibration_mx6sx_ul) +
+					sizeof(iomux_offsets_mx6ul);
+		} else {
+			DMSG("No DDR3 configuration for this device");
+			return TEE_ERROR_GENERIC;
+		}
+
+		DMSG("DDR3 mem function size=%d, data size=%d",
+				  function_size, data_size);
+		break;
+
+	default:
+		DMSG("Not supported ddr_type=%d", ddr_type);
+		return TEE_ERROR_GENERIC;
+	}
+
+	/* Determine the size to be allocated aligned on a 4K page */
+	page_size = function_size + data_size;
+
+	if (soc_is_imx6sx())
+		ocram_start += IRAM_6SX_S_SIZE - page_size;
+	else if ((ocram_start + page_size) > tlb_ocram) {
+		DMSG("Busfreq required %d bytes, available %ld bytes",
+			 page_size, (tlb_ocram - ocram_start));
+		return TEE_ERROR_GENERIC;
+	}
+
+	if (!soc_is_imx6sx())
+		pm_ocram_free_area += page_size;
+
+	/* Initialize the reserved page(s) with 0 */
+	memset((void *)ocram_start, 0, page_size);
+	/* Copy the DDR Change function in Secure non-DDR memory */
+	memcpy((void *)ocram_start, (void *)((uint32_t)change_ddr_freq),
+			function_size);
+
+	/*
+	 * Initialize address of ddr_info and change_ddr_freq function
+	 * to be in OCRAM
+	 */
+	ddr_info = (struct busfreq_info_mx6 *)(ocram_start + function_size);
+	change_ddr_freq = (change_ddr_freq_func)(ocram_start);
+
+	if (ddr_type == IMX_DDR_TYPE_DDR3)
+		imx6_ddr3_info_setup();
+
+	/* Clean D$ to ensure physical memory is correct */
+	dcache_clean_range((void *)ocram_start, page_size);
+
+	return TEE_SUCCESS;
+}
+
+/**
+ * @brief   Change the bus frequency on i.MX6 device
+ *          to the requested frequency \a freq and
+ *          switch DLL off is \a dll_off = 1
+ *
+ * @param[in] freq     Frequency to switch to
+ * @param[in] dll_off  switch DLL off or not
+ *
+ * @retval  TEE_SUCCESS              No error
+ * @retval  TEE_ERROR_NOT_SUPPORTED  Feature not supported
+ */
+TEE_Result imx6_busfreq_change(uint32_t freq, uint32_t dll_off)
+{
+	uint32_t cpsr;
+	uint32_t cpsr_if;
+
+	DMSG("Change DDR frequency to %d Hz DLL %s",
+			freq, (dll_off ? "OFF" : "ON"));
+
+	if (!ddr_info)
+		return TEE_ERROR_NOT_SUPPORTED;
+
+	/* Disable interrupts */
+	cpsr = read_cpsr();
+	write_cpsr(cpsr | (CPSR_I | CPSR_F));
+	isb();
+
+	/* Save the I/F bits */
+	cpsr_if = cpsr & (CPSR_I | CPSR_F);
+
+	ddr_info->freq		= freq;
+	ddr_info->dll_off	= dll_off;
+	dsb();
+
+	change_ddr_freq(ddr_info);
+
+	/* Restore cpsr I/F bits. Bit = 0 => enabled */
+	cpsr = read_cpsr();
+	cpsr &= ~(CPSR_I | CPSR_F);
+	cpsr |= cpsr_if;
+	write_cpsr(cpsr);
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/pm/imx/busfreq/busfreq_imx6.h b/core/drivers/pm/imx/busfreq/busfreq_imx6.h
new file mode 100644
index 000000000..62253da8c
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_imx6.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_imx6.h
+ *
+ * @brief   Busfreq imx6 header.
+ */
+#ifndef __BUSFREQ_IMX6_H__
+#define __BUSFREQ_IMX6_H__
+
+#ifndef __ASSEMBLER__
+/**
+ * @brief   Definition of the structure used during the bus frequency
+ *          operation.
+ *          This structure must be inline with the assembly code offset
+ *          definition.
+ */
+struct busfreq_info_mx6 {
+	uint32_t cpu_type;
+	uint32_t freq;
+	uint32_t dll_off;
+	uint32_t (*ddr_settings)[2];
+	uint32_t ddr_settings_size;
+	uint32_t (*ddr_calibration)[2];
+	uint32_t ddr_calibration_size;
+	uint32_t (*iomux_offsets)[2];
+	uint32_t iomux_offsets_size;
+	uint32_t ccm_base;
+	uint32_t iomux_base;
+	uint32_t mmdc_base;
+	uint32_t pl310_v_base;
+	uint32_t ttbr1;
+} __aligned(8);
+
+TEE_Result imx6_busfreq_init(void);
+TEE_Result imx6_busfreq_change(uint32_t freq, uint32_t dll_off);
+
+void imx6_ddr3_freq_change(struct busfreq_info_mx6 *info);
+size_t get_imx6_ddr3_freq_change_size(void);
+void imx6_lpddr2_freq_change(struct busfreq_info_mx6 *info);
+size_t get_imx6_lpddr2_freq_change_size(void);
+#endif
+/*
+ * Define the CPU ID bit mask needed in the Assembly code
+ */
+#define BUSFREQ_CPU_MX6UL		BIT32(0)
+#define BUSFREQ_CPU_MX6QP		BIT32(1)
+#define BUSFREQ_CPU_MX6SX		BIT32(2)
+
+#endif
diff --git a/core/drivers/pm/imx/busfreq/busfreq_imx6_defines.c b/core/drivers/pm/imx/busfreq/busfreq_imx6_defines.c
new file mode 100644
index 000000000..8768287ae
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_imx6_defines.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_imx6_defines.c
+ *
+ * @brief   Implementation of the bus frequency scaling.\n
+ *          Definition of the struct busfreq_info_mx6 field to
+ *          be shared with the assembly code
+ */
+
+/* Global includes */
+#include <gen-asm-defines.h>
+#include <tee_api_types.h>
+
+/* Local include */
+#include "busfreq_imx6.h"
+
+/*
+ * Define the offset of the structure busfreq_info_mx6
+ * defined in the busfreq_imx6.h file
+ */
+DEFINES
+{
+	DEFINE(BUSFREQ_INFO_CPU_TYPE,
+		offsetof(struct busfreq_info_mx6, cpu_type));
+	DEFINE(BUSFREQ_INFO_FREQ,
+		offsetof(struct busfreq_info_mx6, freq));
+	DEFINE(BUSFREQ_INFO_DLL_OFF,
+		offsetof(struct busfreq_info_mx6, dll_off));
+	DEFINE(BUSFREQ_INFO_DDR_SETTINGS,
+		offsetof(struct busfreq_info_mx6, ddr_settings));
+	DEFINE(BUSFREQ_INFO_DDR_SETTINGS_SIZE,
+		offsetof(struct busfreq_info_mx6, ddr_settings_size));
+	DEFINE(BUSFREQ_INFO_DDR_CALIBRATION,
+		offsetof(struct busfreq_info_mx6, ddr_calibration));
+	DEFINE(BUSFREQ_INFO_DDR_CALIBRATION_SIZE,
+		offsetof(struct busfreq_info_mx6, ddr_calibration_size));
+	DEFINE(BUSFREQ_INFO_IOMUX_OFFSETS,
+		offsetof(struct busfreq_info_mx6, iomux_offsets));
+	DEFINE(BUSFREQ_INFO_IOMUX_OFFSETS_SIZE,
+		offsetof(struct busfreq_info_mx6, iomux_offsets_size));
+	DEFINE(BUSFREQ_INFO_CCM_BASE,
+		offsetof(struct busfreq_info_mx6, ccm_base));
+	DEFINE(BUSFREQ_INFO_IOMUX_BASE,
+		offsetof(struct busfreq_info_mx6, iomux_base));
+	DEFINE(BUSFREQ_INFO_MMDC_BASE,
+		offsetof(struct busfreq_info_mx6, mmdc_base));
+	DEFINE(BUSFREQ_INFO_PL310_V_BASE,
+		offsetof(struct busfreq_info_mx6, pl310_v_base));
+	DEFINE(BUSFREQ_INFO_TTBR1_OFF,
+		offsetof(struct busfreq_info_mx6, ttbr1));
+}
+
diff --git a/core/drivers/pm/imx/busfreq/busfreq_imx7.c b/core/drivers/pm/imx/busfreq/busfreq_imx7.c
new file mode 100644
index 000000000..c87d1ea93
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_imx7.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_imx7.c
+ *
+ * @brief   Implementation of the bus frequency scaling.\n
+ *          Interface and initialization.
+ */
+/* Standard includes */
+#include <string.h>
+
+/* Global includes */
+#include <kernel/cache_helpers.h>
+#include <mm/core_memprot.h>
+#include <tee_api_types.h>
+
+/* Platform includes */
+#include <platform_config.h>
+#include <imx.h>
+#include <imx_pm.h>
+
+/* Local include */
+#include "busfreq_imx7.h"
+
+/* Local data */
+typedef void (*change_ddr_freq_func)(struct busfreq_info_mx7 *);
+static change_ddr_freq_func change_ddr_freq;
+static struct busfreq_info_mx7 *ddr_info;
+
+/**
+ * @brief   Setup the DDR3 info struct used in the busfreq assembly
+ *          function.
+ */
+static void info_setup(void)
+{
+	ddr_info->ddr_type = imx_get_ddr_type();
+
+	ddr_info->rev = imx_soc_rev_minor();
+
+	/* Initialize the registers base address */
+	ddr_info->ccm_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC,
+					     CCM_SIZE);
+	ddr_info->iomux_base = core_mmu_get_va(IOMUXC_GPR_BASE, MEM_AREA_IO_SEC,
+					       IOMUXC_SIZE);
+	ddr_info->ddrc_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+					      MMDC_P0_SIZE);
+	ddr_info->ddrc_phy_base = core_mmu_get_va(DDRC_PHY_BASE,
+						  MEM_AREA_IO_SEC,
+						  DDRC_PHY_SIZE);
+}
+
+/**
+ * @brief   Install the busfreq assembly function into the OCRAM
+ *          and fill the busfreq data
+ *
+ * @retval  TEE_SUCCESS         Success
+ * @retval  TEE_ERROR_GENERIC   Generic Error
+ */
+TEE_Result imx7_busfreq_init(void)
+{
+	int ddr_type;
+
+	size_t function_size = 0;
+	size_t data_size = 0;
+	size_t page_size;
+
+	uint32_t ocram_start = core_mmu_get_va(
+				imx_get_ocram_tz_start_addr() +
+				BUSFREQ_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+				BUSFREQ_MAX_SIZE);
+
+	/* Get the type of memory used */
+	ddr_type = imx_get_ddr_type();
+
+	change_ddr_freq = &imx7_ddr_freq_change;
+	function_size   = get_imx7_ddr_freq_change_size();
+	data_size       = sizeof(struct busfreq_info_mx7);
+
+	if (ddr_type == IMX_DDR_TYPE_DDR3) {
+		DMSG("DDR3 mem function size=%d, data size=%d",
+			function_size, data_size);
+	} else if (ddr_type == IMX_DDR_TYPE_LPDDR2) {
+		DMSG("LPDDR2 mem function size=%d, data size=%d",
+			function_size, data_size);
+	} else {
+		DMSG("LPDDR3 mem function size=%d, data size=%d",
+			function_size, data_size);
+	}
+
+	/* Determine the size to be allocated aligned on a 4K page */
+	page_size = function_size + data_size;
+
+	/*
+	 * Check if there is enough place to install
+	 * the busfreq data and function
+	 */
+	if (page_size > BUSFREQ_MAX_SIZE) {
+		DMSG("Busfreq required %d bytes, available %d bytes",
+			 page_size, BUSFREQ_MAX_SIZE);
+		return TEE_ERROR_GENERIC;
+	}
+
+	/* Initialize the reserved page(s) with 0 */
+	memset((void *)ocram_start, 0, page_size);
+	/* Copy the DDR Change function in Secure non-DDR memory */
+	memcpy((void *)ocram_start, (void *)((uint32_t)change_ddr_freq),
+			function_size);
+
+	/*
+	 * Initialize address of ddr_info and change_ddr_freq function
+	 * to be in OCRAM
+	 */
+	ddr_info = (struct busfreq_info_mx7 *)(ocram_start + function_size);
+	change_ddr_freq = (change_ddr_freq_func)(ocram_start);
+
+	info_setup();
+
+	/* Clean D$ to ensure physical memory is correct */
+	dcache_clean_range((void *)ocram_start, page_size);
+
+	return TEE_SUCCESS;
+
+}
+
+/**
+ * @brief   Change the bus frequency on i.MX7 device
+ *          to the requested frequency \a freq and
+ *          switch DLL off is \a dll_off = 1
+ *
+ * @param[in] freq     Frequency to switch to
+ * @param[in] dll_off  switch DLL off or not - not used
+ *
+ * @retval  TEE_SUCCESS              No error
+ * @retval  TEE_ERROR_NOT_SUPPORTED  Feature not supported
+ */
+TEE_Result imx7_busfreq_change(uint32_t freq, uint32_t dll_off __unused)
+{
+	uint32_t cpsr;
+	uint32_t cpsr_if;
+
+	DMSG("Change DDR frequency to %d Hz DLL", freq);
+
+	if (!ddr_info)
+		return TEE_ERROR_NOT_SUPPORTED;
+
+	/* Disable interrupts */
+	cpsr = read_cpsr();
+	write_cpsr(cpsr | (CPSR_I | CPSR_F));
+	isb();
+
+	/* Save the I/F bits */
+	cpsr_if = cpsr & (CPSR_I | CPSR_F);
+
+	ddr_info->freq = freq;
+	dsb();
+
+	change_ddr_freq(ddr_info);
+
+	/* Restore cpsr I/F bits. Bit = 0 => enabled */
+	cpsr = read_cpsr();
+	cpsr &= ~(CPSR_I | CPSR_F);
+	cpsr |= cpsr_if;
+	write_cpsr(cpsr);
+
+	return TEE_SUCCESS;
+}
diff --git a/core/drivers/pm/imx/busfreq/busfreq_imx7.h b/core/drivers/pm/imx/busfreq/busfreq_imx7.h
new file mode 100644
index 000000000..61c86ea2b
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_imx7.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_imx7.h
+ *
+ * @brief   Busfreq imx7 header.
+ */
+
+#ifndef __BUSFREQ_IMX7_H__
+#define __BUSFREQ_IMX7_H__
+
+#ifndef __ASSEMBLER__
+/**
+ * @brief   Definition of the structure used during the bus frequency
+ *          operation.
+ *          This structure must be inline with the assembly code offset
+ *          definition.
+ */
+struct busfreq_info_mx7 {
+	uint32_t freq;
+	uint16_t ddr_type;
+	uint16_t rev;
+	uint32_t ccm_base;
+	uint32_t iomux_base;
+	uint32_t ddrc_base;
+	uint32_t ddrc_phy_base;
+} __aligned(8);
+
+
+TEE_Result imx7_busfreq_init(void);
+TEE_Result imx7_busfreq_change(uint32_t freq, uint32_t dll_off);
+
+void imx7_ddr_freq_change(struct busfreq_info_mx7 *info);
+size_t get_imx7_ddr_freq_change_size(void);
+#endif
+
+#endif
diff --git a/core/drivers/pm/imx/busfreq/busfreq_imx7_defines.c b/core/drivers/pm/imx/busfreq/busfreq_imx7_defines.c
new file mode 100644
index 000000000..3ff1e4177
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_imx7_defines.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_imx7_defines.c
+ *
+ * @brief   Implementation of the bus frequency scaling.\n
+ *          Definition of the struct busfreq_info_mx7 field to
+ *          be shared with the assembly code
+ */
+
+/* Global includes */
+#include <gen-asm-defines.h>
+#include <tee_api_types.h>
+
+/* Local include */
+#include "busfreq_imx7.h"
+
+/*
+ * Define the offset of the structure busfreq_info_mx7
+ * defined in the busfreq_imx7.h file
+ */
+DEFINES
+{
+	DEFINE(BUSFREQ_INFO_FREQ,
+		offsetof(struct busfreq_info_mx7, freq));
+	DEFINE(BUSFREQ_INFO_DDR_TYPE,
+		offsetof(struct busfreq_info_mx7, ddr_type));
+	DEFINE(BUSFREQ_INFO_REV,
+		offsetof(struct busfreq_info_mx7, rev));
+	DEFINE(BUSFREQ_INFO_CCM_BASE,
+		offsetof(struct busfreq_info_mx7, ccm_base));
+	DEFINE(BUSFREQ_INFO_IOMUX_BASE,
+		offsetof(struct busfreq_info_mx7, iomux_base));
+	DEFINE(BUSFREQ_INFO_DDRC_BASE,
+		offsetof(struct busfreq_info_mx7, ddrc_base));
+	DEFINE(BUSFREQ_INFO_DDRC_PHY_BASE,
+		offsetof(struct busfreq_info_mx7, ddrc_phy_base));
+}
+
diff --git a/core/drivers/pm/imx/busfreq/busfreq_lpddr2_imx6.S b/core/drivers/pm/imx/busfreq/busfreq_lpddr2_imx6.S
new file mode 100644
index 000000000..bf2eb29cb
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_lpddr2_imx6.S
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_lpddr2_imx6.S
+ *
+ * @brief   Implementation of the bus frequency LPDDR2
+ *          assembly function on i.MX6 devices
+ */
+
+/* Global includes */
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+
+/* Generated includes */
+#include <generated/busfreq_imx6_defines.h>
+
+/* Local includes */
+#include "busfreq_imx6.h"
+
+#include "busfreq_macro_imx6.S"
+
+	.macro set_timings_below_100MHz_operation
+	/* Set MMDCx_MISC[RALAT] = 2 cycles */
+	ldr	r0, [r9, #MX6_MMDC_MDMISC]
+	bic	r0, r0, #(0x7 << 6)
+	orr	r0, r0, #(0x2 << 6)
+	str	r0, [r9, #MX6_MMDC_MDMISC]
+
+	/* Adjust LPDDR2 timmings for 24Mhz operation */
+	ldr	r1, =0x03032073
+	str	r1, [r9, #MX6_MMDC_MDCFG0]
+	ldr	r2, =0x00020482
+	str	r2, [r9, #MX6_MMDC_MDCFG1]
+	ldr	r3, =0x00000049
+	str	r3, [r9, #MX6_MMDC_MDCFG2]
+	ldr	r4, =0x00020333
+	str	r4, [r9, #MX6_MMDC_MDCFG3LP]
+
+	/* Check if lpddr2 channel 1 is enabled */
+	cmp r8, #0
+	beq	skip_below_100Mhz_ch1_timings
+
+	ldr	r0, [r8, #MX6_MMDC_MDMISC]
+	bic	r0, r0, #(0x7 << 6)
+	orr	r0, r0, #(0x2 << 6)
+	str	r0, [r8, #MX6_MMDC_MDMISC]
+
+	str	r1, [r8, #MX6_MMDC_MDCFG0]
+	str	r2, [r8, #MX6_MMDC_MDCFG1]
+	str	r3, [r8, #MX6_MMDC_MDCFG2]
+	str	r4, [r8, #MX6_MMDC_MDCFG3LP]
+
+skip_below_100Mhz_ch1_timings:
+
+	.endm
+
+	.macro set_timings_above_100MHz_operation
+	/* Set MMDCx_MISC[RALAT] = 5 cycles */
+	ldr	r0, [r9, #MX6_MMDC_MDMISC]
+	bic	r0, r0, #(0x7 << 6)
+	orr	r0, r0, #(0x5 << 6)
+	str	r0, [r9, #MX6_MMDC_MDMISC]
+
+	/* Adjust LPDDR2 timmings for 400Mhz operation */
+	ldr	r1, =0x33374133
+	str	r1, [r9, #MX6_MMDC_MDCFG0]
+	ldr	r2, =0x00100A82
+	str	r2, [r9, #MX6_MMDC_MDCFG1]
+	ldr	r3, =0x00000093
+	str	r3, [r9, #MX6_MMDC_MDCFG2]
+	ldr	r4, =0x001A0889
+	str	r4, [r9, #MX6_MMDC_MDCFG3LP]
+
+	/* Check if lpddr2 channel 1 is enabled */
+	cmp r8, #0
+	beq	skip_above_100Mhz_ch1_timings
+
+	ldr	r0, [r8, #MX6_MMDC_MDMISC]
+	bic	r0, r0, #(0x7 << 6)
+	orr	r0, r0, #(0x5 << 6)
+	str	r0, [r8, #MX6_MMDC_MDMISC]
+
+	str	r1, [r8, #MX6_MMDC_MDCFG0]
+	str	r2, [r8, #MX6_MMDC_MDCFG1]
+	str	r3, [r8, #MX6_MMDC_MDCFG2]
+	str	r4, [r8, #MX6_MMDC_MDCFG3LP]
+
+skip_above_100Mhz_ch1_timings:
+
+	.endm
+
+	.macro	mmdc_clk_lower_100MHz
+
+	is_mx6qp
+	bne	1f
+	set_timings_below_100MHz_operation
+
+1:
+	/*
+	 * Prior to reducing the DDR frequency (at 528/400 MHz),
+	 * read the Measure unit count bits (MU_UNIT_DEL_NUM)
+	 */
+	ldr r5, =MX6_MMDC_MPMUR0
+	ldr	r0, [r9, r5]
+	/* Original MU unit count */
+	mov	r0, r0, LSR #16
+	ldr	r1, =0x3FF
+	and	r0, r0, r1
+	/* Original MU unit count * 2 */
+	mov	r2, r0, LSL #1
+	/*
+	 * Bypass the automatic measure unit when below 100 MHz
+	 * by setting the Measure unit bypass enable bit (MU_BYP_EN)
+	 */
+	ldr	r0, [r9, r5]
+	orr	r0, r0, #0x400
+	str	r0, [r9, r5]
+	/*
+	 * Double the measure count value read in step 1 and program it in the
+	 * measurement bypass bits (MU_BYP_VAL) of the MMDC PHY Measure Unit
+	 * Register for the reduced frequency operation below 100 MHz
+	 */
+	ldr	r0, [r9, r5]
+	ldr	r1, =0x3FF
+	bic	r0, r0, r1
+	orr	r0, r0, r2
+	str	r0, [r9, r5]
+
+	is_mx6qp
+	bne	not_6qp_clk_lower_100MHz
+
+	/* Now perform a Force Measurement. */
+	ldr	r0, [r9, r5]
+	orr	r0, r0, #0x800
+	str	r0, [r9, r5]
+	/* Wait for FRC_MSR to clear. */
+force_measure:
+	ldr	r0, [r9, r5]
+	and	r0, r0, #0x800
+	cmp	r0, #0x0
+	bne	force_measure
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	end_clk_lower_100MHz
+
+	ldr	r0, [r8, r5]
+	/* Original MU unit count */
+	mov	r0, r0, LSR #16
+	ldr	r1, =0x3FF
+	and	r0, r0, r1
+	/* Original MU unit count * 2 */
+	mov	r2, r0, LSL #1
+	/*
+	 * Bypass the automatic measure unit when below 100 MHz
+	 * by setting the Measure unit bypass enable bit (MU_BYP_EN)
+	 */
+	ldr	r0, [r8, r5]
+	orr	r0, r0, #0x400
+	str	r0, [r8, r5]
+	/*
+	 * Double the measure count value read in step 1 and program it in the
+	 * measurement bypass bits (MU_BYP_VAL) of the MMDC PHY Measure Unit
+	 * Register for the reduced frequency operation below 100 MHz
+	 */
+	ldr	r0, [r8, r5]
+	ldr	r1, =0x3FF
+	bic	r0, r0, r1
+	orr	r0, r0, r2
+	str	r0, [r8, r5]
+	/* Now perform a Force Measurement. */
+	ldr	r0, [r8, r5]
+	orr	r0, r0, #0x800
+	str	r0, [r8, r5]
+	/* Wait for FRC_MSR to clear. */
+force_measure_ch1:
+	ldr	r0, [r8, r5]
+	and	r0, r0, #0x800
+	cmp	r0, #0x0
+	bne	force_measure_ch1
+
+	b	end_clk_lower_100MHz
+
+not_6qp_clk_lower_100MHz:
+	/* For freq lower than 100MHz, need to set RALAT to 2 */
+	ldr	r0, [r9, #MX6_MMDC_MDMISC]
+	bic	r0, r0, #(0x7 << 6)
+	orr	r0, r0, #(0x2 << 6)
+	str	r0, [r9, #MX6_MMDC_MDMISC]
+
+end_clk_lower_100MHz:
+	.endm
+
+	.macro	mmdc_clk_above_100MHz
+
+	is_mx6qp
+	bne	1f
+	set_timings_above_100MHz_operation
+
+1:
+
+	/* Make sure that the PHY measurement unit is NOT in bypass mode */
+	ldr	r5, =MX6_MMDC_MPMUR0
+	ldr	r0, [r9, r5]
+	bic	r0, r0, #0x400
+	str	r0, [r9, r5]
+	/* Now perform a Force Measurement. */
+	ldr	r0, [r9, r5]
+	orr	r0, r0, #0x800
+	str	r0, [r9, r5]
+	/* Wait for FRC_MSR to clear. */
+force_measure1:
+	ldr	r0, [r9, r5]
+	and	r0, r0, #0x800
+	cmp	r0, #0x0
+	bne	force_measure1
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	skip_above_force_measure_ch1
+
+	ldr	r0, [r8, r5]
+	bic	r0, r0, #0x400
+	str	r0, [r8, r5]
+	/* Now perform a Force Measurement. */
+	ldr	r0, [r8, r5]
+	orr	r0, r0, #0x800
+	str	r0, [r8, r5]
+	/* Wait for FRC_MSR to clear. */
+force_measure1_ch1:
+	ldr	r0, [r8, r5]
+	and	r0, r0, #0x800
+	cmp	r0, #0x0
+	bne	force_measure1_ch1
+
+skip_above_force_measure_ch1:
+	is_mx6ul_or_sx
+	/* For freq lower than 100MHz, need to set RALAT to 5 */
+	ldreq	r0, [r9, #MX6_MMDC_MDMISC]
+	biceq	r0, r0, #(0x7 << 6)
+	orreq	r0, r0, #(0x5 << 6)
+	streq	r0, [r9, #MX6_MMDC_MDMISC]
+
+	.endm
+
+/**
+ * @brief   Switch the i.MX6 LPDDR2 in self-refresh to
+ *          change its clock and exit self-refresh
+ *	        IRQs must be disabled.
+ *
+ * @param[in/out] busfreq_info  busfreq data
+ */
+	.align	3
+
+func imx6_lpddr2_freq_change , :
+	push	{r1-r12, lr}
+
+	/* Save the input parameters */
+	push	{r0}
+
+	ldr		r11, =cpu_mmu_disable_dcache
+	mov		lr, pc
+	mov		pc, r11
+
+	pop		{r12}
+
+#ifdef CFG_PL310
+	/* Disable the L2$ */
+	ldr		r0, [r12, #BUSFREQ_INFO_PL310_V_BASE]
+	arm_cl2_disable
+#endif
+
+bypass_l2_disable:
+	tlb_set_to_ocram r12
+
+	/* Keep r8, r9, r10, and r12 as is */
+	mov r12, r0
+	ldr	r9,  [r12, #BUSFREQ_INFO_MMDC_BASE]
+	ldr	r10, [r12, #BUSFREQ_INFO_CCM_BASE]
+
+	/* Ensure TLB preloaded */
+	ldr r0, [r10]
+
+	is_mx6ul_or_sx
+	moveq	r8, #0
+	beq	1f
+
+	/* r8 = MMDC Port 2 if port 2 used, else r8 = 0*/
+	/* Check if lpddr2 channel 2 is enabled */
+	ldr r8, =MX6_MMDC2_OFFSET
+	add r8, r8, r9
+	ldr	r0, [r9, #MX6_MMDC_MDMISC]
+	ands r0, r0, #(1 << 2)
+	moveq r8, #0
+
+1:
+	/* Disable Automatic power savings. */
+	ldr	r0, [r9, #MX6_MMDC_MAPSR]
+	orr	r0, r0, #0x1
+	str	r0, [r9, #MX6_MMDC_MAPSR]
+
+	/* MMDC0_MDPDC disable power down timer */
+	ldr	r0, [r9, #MX6_MMDC_MDPDC]
+	bic	r0, r0, #(0xFF << 8)
+	str	r0, [r9, #MX6_MMDC_MDPDC]
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	skip_psd_ch1
+
+	ldr	r0, [r8, #MX6_MMDC_MAPSR]
+	orr	r0, r0, #0x1
+	str	r0, [r8, #MX6_MMDC_MAPSR]
+
+	ldr	r0, [r8, #MX6_MMDC_MDPDC]
+	bic	r0, r0, #(0xFF << 8)
+	str	r0, [r8, #MX6_MMDC_MDPDC]
+
+skip_psd_ch1:
+	/* delay for a while */
+	do_wait #10
+
+	/* Make the DDR explicitly enter self-refresh. */
+	enter_dvfs
+
+	/* set SBS step-by-step mode */
+	ldr	r0, [r9, #MX6_MMDC_MADPCR0]
+	orr	r0, r0, #0x100
+	str	r0, [r9, #MX6_MMDC_MADPCR0]
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	skip_sbs_ch1
+
+	enter_dvfs r8
+
+	ldr	r0, [r8, #MX6_MMDC_MADPCR0]
+	orr	r0, r0, #0x100
+	str	r0, [r8, #MX6_MMDC_MADPCR0]
+
+skip_sbs_ch1:
+	ldr r7, [r12, #BUSFREQ_INFO_FREQ]
+	ldr	r1, =FREQ_100MHZ
+	cmp	r7, r1
+	bgt	set_ddr_mu_above_100
+
+	is_mx6ul_or_sx
+	bne	do_clk_lower_100MHz
+
+	/* Check if the DLL is off */
+	ldr	r0, [r12, #BUSFREQ_INFO_DLL_OFF]
+	cmp	r0, #1
+	beq	set_ddr_mu_above_100
+
+do_clk_lower_100MHz:
+	mmdc_clk_lower_100MHz
+
+set_ddr_mu_above_100:
+	ldr	r1, =FREQ_24MHZ
+	cmp	r7, r1
+	beq	set_to_24MHz
+
+	ldr	r1, =FREQ_100MHZ
+	cmp	r7, r1
+	beq set_to_100MHz
+
+	ldr r1, =FREQ_400MHZ
+	cmp r7, r1
+	switch_to_400MHz
+	b	done
+
+set_to_24MHz:
+	switch_to_24MHz
+	b	done
+
+set_to_100MHz:
+	switch_to_100MHz
+
+done:
+	ldr	r1,=FREQ_100MHZ
+	cmp	r7, r1
+	blt	skip_mmdc_clk_check
+	mmdc_clk_above_100MHz
+
+skip_mmdc_clk_check:
+
+	/* clear DVFS - exit from self refresh mode */
+	exit_dvfs
+
+	/* Enable Automatic power savings. */
+	ldr	r0, [r9, #MX6_MMDC_MAPSR]
+	bic	r0, r0, #0x1
+	str	r0, [r9, #MX6_MMDC_MAPSR]
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	skip_enable_psd_ch1
+
+	/* clear DVFS - exit self refresh mode. */
+	exit_dvfs r8
+
+	ldr	r0, [r8, #MX6_MMDC_MAPSR]
+	bic	r0, r0, #0x1
+	str	r0, [r8, #MX6_MMDC_MAPSR]
+
+skip_enable_psd_ch1:
+	ldr	r1, =FREQ_24MHZ
+	cmp	r7, r1
+	beq	skip_power_down
+
+	/* Enable MMDC power down timer. */
+	ldr	r0, [r9, #MX6_MMDC_MDPDC]
+	orr	r0, r0, #0x5500
+	str	r0, [r9, #MX6_MMDC_MDPDC]
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	skip_power_down
+
+	ldr	r0, [r8, #MX6_MMDC_MDPDC]
+	orr	r0, r0, #0x5500
+	str	r0, [r8, #MX6_MMDC_MDPDC]
+
+skip_power_down:
+	/* clear SBS - unblock DDR accesses */
+	ldr	r0, [r9, #MX6_MMDC_MADPCR0]
+	bic	r0, r0, #0x100
+	str	r0, [r9, #MX6_MMDC_MADPCR0]
+
+	/* Check if lpddr2 channel 2 is enabled */
+	cmp r8, #0
+	beq	skip_disable_sbs_ch1
+
+	ldr	r0, [r8, #MX6_MMDC_MADPCR0]
+	bic	r0, r0, #0x100
+	str	r0, [r8, #MX6_MMDC_MADPCR0]
+
+skip_disable_sbs_ch1:
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	tlb_back_to_ddr r12
+
+#ifdef CFG_PL310
+	/* Enable the L2$ */
+	ldr		r0, [r12, #BUSFREQ_INFO_PL310_V_BASE]
+	ldr		r11, =arm_cl2_enable
+	mov		lr, pc
+	mov		pc, r11
+#endif
+
+bypass_l2_enable:
+    /* Enable D$ */
+    read_sctlr  r2
+    orr     r2, r2, #SCTLR_C
+    write_sctlr r2
+	isb
+
+	pop	{r1-r12, lr}
+
+	/* Restore registers */
+	mov	pc, lr
+
+	/* Ensure that all constant will be stored here */
+	.ltorg
+
+	.equ	FREQ_400MHZ, 400000000
+	.equ	FREQ_100MHZ, 100000000
+	.equ	FREQ_24MHZ,   24000000
+
+END_FUNC imx6_lpddr2_freq_change
+
+
+FUNC get_imx6_lpddr2_freq_change_size , :
+	subs	r0, pc, #8
+	ldr		r1, =imx6_lpddr2_freq_change
+	sub		r0, r0, r1
+	bx		lr
+END_FUNC get_imx6_lpddr2_freq_change_size
+
diff --git a/core/drivers/pm/imx/busfreq/busfreq_macro_imx6.S b/core/drivers/pm/imx/busfreq/busfreq_macro_imx6.S
new file mode 100644
index 000000000..1ae78f6bb
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/busfreq_macro_imx6.S
@@ -0,0 +1,644 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    busfreq_macro_imx6.S
+ *
+ * @brief   Implementation of the bus frequency common macro
+ *          on i.MX6 devices
+ */
+
+/* Registers includes */
+#include <imx-regs.h>
+#include <kernel/tz_ssvce_def.h>
+
+#ifdef CFG_PL310
+	.macro arm_cl2_disable
+loop_dis_sync:
+	ldr	r1, [r0, #PL310_SYNC]
+	cmp	r1, #0
+	bne	loop_dis_sync
+
+	mov	r1, #1
+	str	r1, [r0, #PL310_SYNC]
+wait_dis_sync:
+	ldr	r1, [r0, #PL310_SYNC]
+	cmp	r1, #0
+	bne	wait_dis_sync
+
+	/* disable PL310 ctrl -> only set lsb bit */
+	mov	r1, #0x0
+	str	r1, [r0, #PL310_CTRL]
+
+	dsb
+	isb
+	.endm
+#endif
+
+.extern iram_tlb_phys_addr
+
+	.macro tlb_set_to_ocram base
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [\base, #BUSFREQ_INFO_TTBR1_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
+	 *     and 2-4G is translated by TTBR1.
+	 */
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #SCTLR_Z
+	write_sctlr r6
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1/TTBR0 */
+	write_ttbr1 r7
+
+	/* Read TTBCR and set PD0=1 and PD1=0 */
+	/* Warning: unknown behaviour if LPAE is enabled */
+	read_ttbcr r6
+	bic	r6, r6, #(TTBCR_PD1 | TTBCR_PD0)
+	orr	r6, r6, #TTBCR_PD0
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+	.endm
+
+	.macro tlb_back_to_ddr base
+	/* Restore the TTBCR */
+	dsb
+	isb
+
+	/* Read TTBCR and set PD0=0 and PD1=0 */
+	read_ttbcr r6
+	bic	r6, r6, #(TTBCR_PD1 | TTBCR_PD0)
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	dsb
+	isb
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #SCTLR_Z
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r6, [\base, #BUSFREQ_INFO_TTBR1_OFF]
+	write_ttbr1 r6
+
+	isb
+	.endm
+
+
+	.macro is_mx6qp
+	ldr	r0, [r12, #BUSFREQ_INFO_CPU_TYPE]
+	ldr	r1, =BUSFREQ_CPU_MX6QP
+	cmp	r0, r1
+	.endm
+
+	.macro is_mx6ul_or_sx
+	ldr	r0, [r12, #BUSFREQ_INFO_CPU_TYPE]
+	ldr	r1, =BUSFREQ_CPU_MX6UL
+	cmp	r0, r1
+	ldrne	r1, =BUSFREQ_CPU_MX6SX
+	cmpne	r0, r1
+	.endm
+
+	.macro do_wait delay
+	mov	r1, \delay
+1:
+	mov	r2, #MX6_MMDC_MDCTL
+2:
+	ldr	r0, [r9, r2]
+	add	r2, r2, #4
+	cmp	r2, #16
+	bne	2b
+	sub	r1, r1, #1
+	cmp	r1, #0
+	bgt	1b
+	.endm
+
+	.macro con_req_ack_set
+	mov	r0, #(1 << 15)
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+1:
+	ldr	r0, [r9, #MX6_MMDC_MDSCR]
+	ands	r0, r0, #(1 << 14)
+	beq	1b
+	.endm
+
+	.macro con_req_ack_clr
+	mov	r0, #0
+	str	r0, [r9, #MX6_MMDC_MDSCR]
+1:
+	ldr	r0, [r9, #MX6_MMDC_MDSCR]
+	ands	r0, r0, #(1 << 14)
+	bne	1b
+	.endm
+
+	.macro dvfs_ack_set base=r9
+1:
+	ldr	r0, [\base, #MX6_MMDC_MAPSR]
+	ands	r0, r0, #(1 << 25)
+	beq	1b
+	.endm
+
+	.macro dvfs_ack_clr base=r9
+1:
+	ldr	r0, [\base, #MX6_MMDC_MAPSR]
+	ands	r0, r0, #(1 << 25)
+	bne	1b
+	.endm
+
+	.macro enter_dvfs base=r9
+	ldr	r0, [\base, #MX6_MMDC_MAPSR]
+	orr	r0, r0, #(1 << 21)
+	str	r0, [\base, #MX6_MMDC_MAPSR]
+
+	dvfs_ack_set \base
+	.endm
+
+	.macro exit_dvfs base=r9
+	ldr	r0, [\base, #MX6_MMDC_MAPSR]
+	bic	r0, r0, #(1 << 21)
+	str	r0, [\base, #MX6_MMDC_MAPSR]
+
+	dvfs_ack_clr \base
+	.endm
+
+	.macro wait_ccm_handshake
+100:
+	ldr	r0, [r10, #CCM_CDHIPR]
+	cmp	r0, #0
+	bne	100b
+	.endm
+
+	.macro	switch_to_528MHz
+
+	/* check if periph_clk_sel is already set */
+	ldr	r0, [r10, #CCM_CBCDR]
+	and	r0, r0, #(1 << 25)
+	cmp	r0, #(1 << 25)
+	beq	set_ahb_podf_before_switch_528
+
+	/* change periph_clk to be sourced from pll3_clk. */
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(3 << 12)
+	str	r0, [r10, #CCM_CBCMR]
+
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x38 << 20)
+	str	r0, [r10, #CCM_CBCDR]
+
+	/*
+	 * set the AHB dividers before the switch,
+	 * don't change AXI clock divider,
+	 * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
+	 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r2, =0x3f1f00
+	bic	r0, r0, r2
+	orr	r0, r0, #0xd00
+	orr	r0, r0, #(1 << 16)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/* now switch periph_clk to pll3_main_clk. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	b	switch_pre_periph_clk_528
+
+set_ahb_podf_before_switch_528:
+	/*
+	 * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
+	 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r2, =0x3f1f00
+	bic	r0, r0, r2
+	orr	r0, r0, #0xd00
+	orr	r0, r0, #(1 << 16)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+switch_pre_periph_clk_528:
+
+	/* now switch pre_periph_clk to PLL2_528MHz. */
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(0xC << 16)
+	str	r0, [r10, #CCM_CBCMR]
+
+	/* now switch periph_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	.endm
+
+	.macro	switch_to_400Mz_UL_SX
+	/* check whether periph2_clk is already from top path */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ands	r0, #(1 << 26)
+	beq	skip_periph2_clk2_switch_400_UL_SX
+
+	/* now switch periph2_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 26)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/*
+	 * on i.MX6SX, pre_periph2_clk will be always from
+	 * pll2_pfd2, so no need to set pre_periph2_clk
+	 * parent, just set the mmdc divider directly.
+	 */
+skip_periph2_clk2_switch_400_UL_SX:
+
+	/* fabric_mmdc_podf to 0 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x7 << 3)
+	str	r0, [r10, #CCM_CBCDR]
+
+	.endm
+
+	.macro	switch_to_400MHz
+
+	is_mx6qp
+	beq	switch_to_400MHz_6QP
+
+	switch_to_400Mz_UL_SX
+	b	switch_to_400Mz_exit
+
+switch_to_400MHz_6QP:
+	/* check if periph_clk_sel is already set. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	and	r0, r0, #(1 << 25)
+	cmp	r0, #(1 << 25)
+	beq	set_ahb_podf_before_switch1
+
+	/* change periph_clk to be sourced from pll3_clk. */
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(3 << 12)
+	str	r0, [r10, #CCM_CBCMR]
+
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x38 << 24)
+	str	r0, [r10, #CCM_CBCDR]
+
+	/* now switch periph_clk to pll3_main_clk. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	b	switch_pre_periph_clk_400
+
+set_ahb_podf_before_switch1:
+	/*
+	 * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
+	 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r2, =0x3f1f00
+	bic	r0, r0, r2
+	orr	r0, r0, #(0x9 << 8)
+	orr	r0, r0, #(1 << 16)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+switch_pre_periph_clk_400:
+
+	/* now switch pre_periph_clk to PFD_400MHz. */
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(0xc << 16)
+	orr	r0, r0, #(0x4 << 16)
+	str	r0, [r10, #CCM_CBCMR]
+
+	/* now switch periph_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/*
+	 * change AHB divider so that we are at 400/3=133MHz.
+	 * don't change AXI clock divider.
+	 * set the MMDC_DIV=1, AXI_DIV=2, AHB_DIV=3,
+	 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r2, =0x3f1f00
+	bic	r0, r0, r2
+	orr	r0, r0, #(0x9 << 8)
+	orr	r0, r0, #(1 << 16)
+	str	r0, [r10, #CCM_CBCDR]
+
+switch_to_400Mz_exit:
+	wait_ccm_handshake
+
+	.endm
+
+	.macro	switch_to_100MHz_UL_SX
+	/* check whether periph2_clk is from top path */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ands	r0, #(1 << 26)
+	beq	skip_periph2_clk2_switch_100_UL_SX
+
+	/* now switch periph2_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 26)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/*
+	 * on i.MX6SX, pre_periph2_clk will be always from
+	 * pll2_pfd2, so no need to set pre_periph2_clk
+	 * parent, just set the mmdc divider directly.
+	 */
+skip_periph2_clk2_switch_100_UL_SX:
+
+	/* fabric_mmdc_podf to 3 so that mmdc is 400 / 4 = 100MHz */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x7 << 3)
+	orr	r0, r0, #(0x3 << 3)
+	str	r0, [r10, #CCM_CBCDR]
+
+	.endm
+
+	.macro	switch_to_100MHz
+
+	is_mx6qp
+	beq	switch_to_100MHz_6QP
+
+	switch_to_100MHz_UL_SX
+	b	switch_to_100MHz_exit
+
+switch_to_100MHz_6QP:
+	/* check if periph_clk_sel is already set. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	and	r0, r0, #(1 << 25)
+	cmp	r0, #(1 << 25)
+	beq	switch_pre_periph_clk_100
+	/*
+	 * set the periph_clk to be sourced from PLL2_PFD_200M
+	 * change periph_clk to be sourced from pll3_clk.
+	 * ensure PLL3 is the source and set the divider to 1.
+	 */
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(0x3 << 12)
+	str	r0, [r10, #CCM_CBCMR]
+
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x38 << 24)
+	str	r0, [r10, #CCM_CBCDR]
+
+	/* now switch periph_clk to pll3_main_clk. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+switch_pre_periph_clk_100:
+	/* now switch pre_periph_clk to PFD_200MHz. */
+	ldr	r0, [r10, #CCM_CBCMR]
+	orr	r0, r0, #(0xc << 16)
+	str	r0, [r10, #CCM_CBCMR]
+
+	/* set the MMDC_DIV=2, AXI_DIV=4, AHB_DIV=8 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r1, =0x3f1f00
+	bic	r0, r0, r1
+	orr	r0, r0, #(0x8 << 16)
+	orr	r0, r0, #(0x3 << 16)
+
+	/*
+	 * if changing AHB divider remember to change
+	 * the IPGPER divider too below.
+	 */
+	orr	r0, r0, #0x1d00
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/* now switch periph_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+switch_to_100MHz_exit:
+	wait_ccm_handshake
+
+	.endm
+
+	.macro	switch_to_50MHz_UL_SX
+
+	/* check whether periph2_clk is already from top path */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ands	r0, #(1 << 26)
+	beq	skip_periph2_clk2_switch_50_UL_SX
+
+	/* now switch periph2_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 26)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/*
+	 * pre_periph2_clk will be always from
+	 * pll2_pfd2, so no need to set pre_periph2_clk
+	 * parent, just set the mmdc divider directly.
+	 */
+skip_periph2_clk2_switch_50_UL_SX:
+
+	/* fabric_mmdc_podf to 7 so that mmdc is 400 / 8 = 50MHz */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(0x7 << 3)
+	str	r0, [r10, #CCM_CBCDR]
+
+	.endm
+
+	.macro	switch_to_50MHz
+
+	is_mx6qp
+	beq	switch_to_50MHz_6QP
+
+	switch_to_50MHz_UL_SX
+	b	switch_to_50MHz_exit
+
+switch_to_50MHz_6QP:
+	/* check if periph_clk_sel is already set. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	and	r0, r0, #(1 << 25)
+	cmp	r0, #(1 << 25)
+	beq	switch_pre_periph_clk_50
+
+	/*
+	 * set the periph_clk to be sourced from PLL2_PFD_200M
+	 * change periph_clk to be sourced from pll3_clk.
+	 * ensure PLL3 is the source and set the divider to 1.
+	 */
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(0x3 << 12)
+	str	r0, [r10, #CCM_CBCMR]
+
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x38 << 24)
+	str	r0, [r10, #CCM_CBCDR]
+
+	/* now switch periph_clk to pll3_main_clk. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+switch_pre_periph_clk_50:
+
+	/* now switch pre_periph_clk to PFD_200MHz. */
+	ldr	r0, [r10, #CCM_CBCMR]
+	orr	r0, r0, #(0xc << 16)
+	str	r0, [r10, #CCM_CBCMR]
+
+	/*
+	 * set the MMDC_DIV=4, AXI_DIV = 4, AHB_DIV=8,
+	 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r2, =0x3f1f00
+	bic	r0, r0, r2
+	orr	r0, r0, #(0x18 << 16)
+	orr	r0, r0, #(0x3 << 16)
+
+	/*
+	 * if changing AHB divider remember to change
+	 * the IPGPER divider too below.
+	 */
+	orr	r0, r0, #0x1d00
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/* now switch periph_clk back. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+switch_to_50MHz_exit:
+	wait_ccm_handshake
+
+	.endm
+
+	.macro	switch_to_24MHz_UL_SX
+	/* Set periph_clk2_sel to OSC_CLK */
+	ldr	r0, [r10, #CCM_CBCMR]
+	orr	r0, r0, #(1 << 20)
+	str	r0, [r10, #CCM_CBCMR]
+
+	/* Set periph2_clk2_podf to 0 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #0x7
+	str	r0, [r10, #CCM_CBCDR]
+
+	/* Set periph2_clk sel to periph2_clk2 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(0x1 << 26)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/* Set fabric_mmdc_podf to 0 */
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x7 << 3)
+	str	r0, [r10, #CCM_CBCDR]
+
+	.endm
+
+	.macro	switch_to_24MHz
+
+	is_mx6qp
+	beq switch_to_24MHz_6QP
+
+	switch_to_24MHz_UL_SX
+	b	switch_to_24MHz_exit
+
+switch_to_24MHz_6QP:
+	/*
+	 * change the freq now try setting DDR to 24MHz.
+	 * source it from the periph_clk2 ensure the
+	 * periph_clk2 is sourced from 24MHz and the
+	 * divider is 1.
+	 */
+
+	ldr	r0, [r10, #CCM_CBCMR]
+	bic	r0, r0, #(0x3 << 12)
+	orr	r0, r0, #(1 << 12)
+	str	r0, [r10, #CCM_CBCMR]
+
+	ldr	r0, [r10, #CCM_CBCDR]
+	bic	r0, r0, #(0x38 << 24)
+	str	r0, [r10, #CCM_CBCDR]
+
+	/* now switch periph_clk to 24MHz. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	orr	r0, r0, #(1 << 25)
+	str	r0, [r10, #CCM_CBCDR]
+
+	wait_ccm_handshake
+
+	/* change all the dividers to 1. */
+	ldr	r0, [r10, #CCM_CBCDR]
+	ldr	r2, =0x3f1f00
+	bic	r0, r0, r2
+	orr	r0, r0, #(1 << 8)
+	str	r0, [r10, #CCM_CBCDR]
+
+switch_to_24MHz_exit:
+	wait_ccm_handshake
+	.endm
+
+
diff --git a/core/drivers/pm/imx/busfreq/sub.mk b/core/drivers/pm/imx/busfreq/sub.mk
new file mode 100644
index 000000000..f1867e5bd
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/sub.mk
@@ -0,0 +1,9 @@
+incdirs-y += ./..
+
+srcs-y += busfreq.c utils.S
+
+srcs-$(CFG_MX6) += busfreq_imx6.c busfreq_ddr3_imx6.S busfreq_lpddr2_imx6.S
+asm-defines-$(CFG_MX6) += busfreq_imx6_defines.c
+
+srcs-$(CFG_MX7) += busfreq_imx7.c busfreq_asm_imx7.S
+asm-defines-$(CFG_MX7) += busfreq_imx7_defines.c
diff --git a/core/drivers/pm/imx/busfreq/utils.S b/core/drivers/pm/imx/busfreq/utils.S
new file mode 100644
index 000000000..e0f3ca674
--- /dev/null
+++ b/core/drivers/pm/imx/busfreq/utils.S
@@ -0,0 +1,27 @@
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <keep.h>
+#include <kernel/cache_helpers.h>
+
+/* void cpu_mmu_disable_dcache(void) - disable data cache */
+FUNC cpu_mmu_disable_dcache , :
+	push	{r12, lr}
+UNWIND(	.save	{r12, lr})
+
+	mov     r0, #DCACHE_OP_CLEAN_INV
+	bl	dcache_op_all
+
+	/* Disable Cache */
+	read_sctlr r0
+	bic	r0, r0, #SCTLR_C
+	write_sctlr r0
+	isb
+	dsb
+
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	bl	dcache_op_all
+
+	pop	{r12, pc}
+END_FUNC cpu_mmu_disable_dcache
+DECLARE_KEEP_PAGER cpu_mmu_disable_dcache
diff --git a/core/drivers/pm/imx/cpuidle/cpuidle-imx6sl.c b/core/drivers/pm/imx/cpuidle/cpuidle-imx6sl.c
new file mode 100644
index 000000000..ae8f1c369
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/cpuidle-imx6sl.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <arm.h>
+#include <console.h>
+#include <drivers/imx_scu.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx_pl310.h>
+#include <imx_pm.h>
+#include <imx.h>
+#include <kernel/boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <string.h>
+#include <sm/psci.h>
+#include <sm/pm.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <util.h>
+
+/*
+ * cpuidle and suspend use the same one,
+ * because lowpower idle and suspend can not reach at the same time
+ */
+
+int imx6sl_cpuidle_init(void)
+{
+	uint32_t i;
+	const uint32_t *mmdc_io_offset_array;
+	uint32_t lowpower_idle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx6_pm_info *p =
+			(struct imx6_pm_info *)lowpower_idle_ocram_base;
+	struct imx6_pm_data *pm_data;
+
+	DMSG("idle init\n\n\n");
+
+	dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+	p->pa_base = imx_get_ocram_tz_start_addr() + LOWPOWER_IDLE_OCRAM_OFFSET;
+	p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume);
+	p->pm_info_size = sizeof(*p);
+	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, CCM_SIZE);
+	p->ccm_pa_base = CCM_BASE;
+	p->mmdc0_va_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+					   MMDC_P0_SIZE);
+	p->mmdc0_pa_base = MMDC_P0_BASE;
+	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, SRC_SIZE);
+	p->src_pa_base = SRC_BASE;
+	p->iomuxc_va_base = core_mmu_get_va(IOMUXC_BASE, MEM_AREA_IO_SEC,
+					    IOMUXC_SIZE);
+	p->iomuxc_pa_base = IOMUXC_BASE;
+	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, GPC_SIZE);
+	p->gpc_pa_base = GPC_BASE;
+	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC,
+					    ANATOP_SIZE);
+	p->anatop_pa_base = ANATOP_BASE;
+	p->pl310_va_base = core_mmu_get_va(PL310_BASE, MEM_AREA_IO_SEC,
+					   PL310_SIZE);
+
+	pm_data = &imx6sl_pm_data;
+
+	p->mmdc_io_num = pm_data->mmdc_io_num;
+	mmdc_io_offset_array = pm_data->mmdc_io_offset;
+
+	for (i = 0; i < p->mmdc_io_num; i++)
+		p->mmdc_io_val[i][0] = mmdc_io_offset_array[i];
+
+	memcpy((void *)(lowpower_idle_ocram_base + sizeof(*p)),
+		(void *)(vaddr_t)imx6sl_low_power_idle,
+	       LOWPOWER_IDLE_OCRAM_SIZE - sizeof(*p));
+
+	dcache_clean_range((void *)lowpower_idle_ocram_base,
+			    LOWPOWER_IDLE_OCRAM_SIZE);
+	/*
+	 * Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	return 0;
+}
+
+static int lowpoweridle_init;
+
+int imx6sl_lowpower_idle(uint32_t power_state,
+			 uintptr_t entry,
+			 uint32_t context_id __unused,
+			 struct sm_nsec_ctx *nsec)
+{
+	int ret;
+	/*
+	 * TODO: move the code to a platform init place, note that
+	 * need to change kernel pm-imx6.c to avoid use LPRAM.
+	 */
+	uint32_t cpuidle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx6_pm_info *p = (struct imx6_pm_info *)cpuidle_ocram_base;
+
+	/*
+	 * TODO:
+	 * Check power_state?
+	 */
+	if (!lowpoweridle_init) {
+		imx6sl_cpuidle_init();
+		lowpoweridle_init = 1;
+	}
+
+	p->idle_state = power_state;
+
+	/* Store non-sec ctx regs */
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(cpuidle_ocram_base + sizeof(*p)));
+
+	/*
+	 * Sometimes cpu_suspend may not really suspended, we need to check
+	 * it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	/* Restore register of different mode in secure world */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	/* Back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+#ifdef CFG_PL310
+	if (pl310_enabled(pl310_base()))
+		return 0;
+
+	arm_cl2_config(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+	arm_cl2_enable(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+#endif
+
+	DMSG("=== Back from Suspended ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/cpuidle/cpuidle-imx6sll.c b/core/drivers/pm/imx/cpuidle/cpuidle-imx6sll.c
new file mode 100644
index 000000000..23730099d
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/cpuidle-imx6sll.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <arm.h>
+#include <console.h>
+#include <drivers/imx_scu.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx_pl310.h>
+#include <imx_pm.h>
+#include <imx.h>
+#include <kernel/boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <string.h>
+#include <sm/psci.h>
+#include <sm/pm.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <util.h>
+
+/*
+ * cpuidle and suspend use the same one,
+ * because lowpower idle and suspend can not reach at the same time
+ */
+
+int imx6sll_cpuidle_init(void)
+{
+	uint32_t i;
+	const uint32_t *mmdc_io_offset_array;
+	uint32_t lowpower_idle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx6_pm_info *p =
+			(struct imx6_pm_info *)lowpower_idle_ocram_base;
+	struct imx6_pm_data *pm_data;
+
+	dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+	p->pa_base = imx_get_ocram_tz_start_addr() + LOWPOWER_IDLE_OCRAM_OFFSET;
+	p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume);
+	p->pm_info_size = sizeof(*p);
+	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, CCM_SIZE);
+	p->ccm_pa_base = CCM_BASE;
+	p->mmdc0_va_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+					   MMDC_P0_SIZE);
+	p->mmdc0_pa_base = MMDC_P0_BASE;
+	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, SRC_SIZE);
+	p->src_pa_base = SRC_BASE;
+	p->iomuxc_va_base = core_mmu_get_va(IOMUXC_BASE, MEM_AREA_IO_SEC,
+					    IOMUXC_SIZE);
+	p->iomuxc_pa_base = IOMUXC_BASE;
+	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, GPC_SIZE);
+	p->gpc_pa_base = GPC_BASE;
+	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC,
+					    ANATOP_SIZE);
+	p->anatop_pa_base = ANATOP_BASE;
+	p->pl310_va_base = core_mmu_get_va(PL310_BASE, MEM_AREA_IO_SEC,
+					   PL310_SIZE);
+
+	pm_data = &imx6sll_pm_data;
+
+	p->mmdc_io_num = pm_data->mmdc_io_num;
+	mmdc_io_offset_array = pm_data->mmdc_io_offset;
+
+	for (i = 0; i < p->mmdc_io_num; i++)
+		p->mmdc_io_val[i][0] = mmdc_io_offset_array[i];
+
+	memcpy((void *)(lowpower_idle_ocram_base + sizeof(*p)),
+		(void *)(vaddr_t)imx6sll_low_power_idle,
+	       LOWPOWER_IDLE_OCRAM_SIZE - sizeof(*p));
+
+	dcache_clean_range((void *)lowpower_idle_ocram_base,
+			    LOWPOWER_IDLE_OCRAM_SIZE);
+	/*
+	 * Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	return 0;
+}
+
+static int lowpoweridle_init;
+
+int imx6sll_lowpower_idle(uint32_t power_state __unused,
+			 uintptr_t entry,
+			 uint32_t context_id __unused,
+			 struct sm_nsec_ctx *nsec)
+{
+	int ret;
+	/*
+	 * TODO: move the code to a platform init place, note that
+	 * need to change kernel pm-imx6.c to avoid use LPRAM.
+	 */
+	uint32_t cpuidle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx6_pm_info *p = (struct imx6_pm_info *)cpuidle_ocram_base;
+
+	/*
+	 * TODO:
+	 * Check power_state?
+	 */
+	if (!lowpoweridle_init) {
+		imx6sll_cpuidle_init();
+		lowpoweridle_init = 1;
+	}
+
+	/* Store non-sec ctx regs */
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(cpuidle_ocram_base + sizeof(*p)));
+
+	/*
+	 * Sometimes cpu_suspend may not really suspended, we need to check
+	 * it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	/* Restore register of different mode in secure world */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	if (scu_init())
+		return -1;
+
+	/* after enable, flush cache to let other cores can see the data */
+	dcache_op_all(DCACHE_OP_CLEAN_INV);
+
+	/* Back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+#ifdef CFG_PL310
+	if (pl310_enabled(pl310_base()))
+		return 0;
+
+	arm_cl2_config(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+	arm_cl2_enable(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+#endif
+
+	DMSG("=== Back from Suspended ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/cpuidle/cpuidle-imx6sx.c b/core/drivers/pm/imx/cpuidle/cpuidle-imx6sx.c
new file mode 100644
index 000000000..7c7c7242e
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/cpuidle-imx6sx.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <arm.h>
+#include <console.h>
+#include <drivers/imx_scu.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx_pl310.h>
+#include <imx_pm.h>
+#include <imx.h>
+#include <kernel/boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <string.h>
+#include <sm/psci.h>
+#include <sm/pm.h>
+#include <util.h>
+
+static int (*lowpower_idle_func)(uint32_t);
+static struct imx_pm_asm_arg idle_arg;
+static int lowpoweridle_init;
+
+int imx6sx_cpuidle_init(void)
+{
+	uint32_t func_size;
+
+	lowpower_idle_func = (int (*)(uint32_t))(pm_ocram_free_area);
+
+	func_size = get_imx6sx_low_power_idle_size();
+
+	idle_arg.pa_addr = virt_to_phys((void *)(vaddr_t)lowpower_idle_func);
+	idle_arg.pm_info = suspend_arg.pm_info;
+
+	memcpy((void *)(vaddr_t)lowpower_idle_func,
+	       (void *)(vaddr_t)imx6sx_low_power_idle, func_size);
+
+	pm_ocram_free_area += func_size;
+	dcache_clean_range((void *)(vaddr_t)lowpower_idle_func,
+			   (pm_ocram_free_area -
+				(vaddr_t)lowpower_idle_func + 4));
+	/*
+	 * Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	lowpoweridle_init = 1;
+
+	return 0;
+}
+
+
+int imx6sx_lowpower_idle(uint32_t power_state __unused,
+			 uintptr_t entry,
+			 uint32_t context_id __unused,
+			 struct sm_nsec_ctx *nsec)
+{
+	int ret;
+
+	DMSG("=== Enter Low Power Idle ===\n");
+	if (!lowpoweridle_init) {
+		imx6sx_cpuidle_init();
+		lowpoweridle_init = 1;
+	}
+
+	/* Store non-sec ctx regs */
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)&idle_arg, lowpower_idle_func);
+
+
+	/*
+	 * Sometimes cpu_suspend may not really suspended, we need to check
+	 * it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	/* Restore register of different mode in secure world */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	if (scu_init())
+		return -1;
+
+	/* after enable, flush cache to let other cores can see the data */
+	dcache_op_all(DCACHE_OP_CLEAN_INV);
+
+	/* Back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+#ifdef CFG_PL310
+	if (pl310_enabled(pl310_base()))
+		return 0;
+
+	arm_cl2_config(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+	arm_cl2_enable(pl310_base());
+	/* Do we need to lock? cpu performance? */
+	/*arm_cl2_lockallways(pl310_base()); */
+	arm_cl2_invbyway(pl310_base());
+#endif
+
+	DMSG("=== Back from Low Power Idle ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/cpuidle/cpuidle-imx6ul.c b/core/drivers/pm/imx/cpuidle/cpuidle-imx6ul.c
new file mode 100644
index 000000000..2fe6cd431
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/cpuidle-imx6ul.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <arm.h>
+#include <console.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx_pm.h>
+#include <imx.h>
+#include <kernel/boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <string.h>
+#include <sm/psci.h>
+#include <sm/pm.h>
+#include <util.h>
+
+/*
+ * cpuidle and suspend use the same one,
+ * because lowpower idle and suspend can not reach at the same time
+ */
+
+int imx6ul_cpuidle_init(void)
+{
+	uint32_t i;
+	const uint32_t *mmdc_io_offset_array;
+	uint32_t lowpower_idle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx6_pm_info *p =
+			(struct imx6_pm_info *)lowpower_idle_ocram_base;
+	struct imx6_pm_data *pm_data;
+
+	dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+	p->pa_base = imx_get_ocram_tz_start_addr() + LOWPOWER_IDLE_OCRAM_OFFSET;
+	p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume);
+	p->pm_info_size = sizeof(*p);
+	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, CCM_SIZE);
+	p->ccm_pa_base = CCM_BASE;
+	p->mmdc0_va_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+					   MMDC_P0_SIZE);
+	p->mmdc0_pa_base = MMDC_P0_BASE;
+	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, SRC_SIZE);
+	p->src_pa_base = SRC_BASE;
+	p->iomuxc_va_base = core_mmu_get_va(IOMUXC_BASE, MEM_AREA_IO_SEC,
+					    IOMUXC_SIZE);
+	p->iomuxc_pa_base = IOMUXC_BASE;
+	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, GPC_SIZE);
+	p->gpc_pa_base = GPC_BASE;
+	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC,
+					    ANATOP_SIZE);
+	p->anatop_pa_base = ANATOP_BASE;
+
+	pm_data = &imx6ul_pm_data;
+
+	p->mmdc_io_num = pm_data->mmdc_io_num;
+	mmdc_io_offset_array = pm_data->mmdc_io_offset;
+
+	for (i = 0; i < p->mmdc_io_num; i++)
+		p->mmdc_io_val[i][0] = mmdc_io_offset_array[i];
+
+	memcpy((void *)(lowpower_idle_ocram_base + sizeof(*p)),
+#if defined(CFG_MX6UL)
+		(void *)(vaddr_t)imx6ul_low_power_idle,
+#elif defined(CFG_MX6ULL)
+		(void *)(vaddr_t)imx6ull_low_power_idle,
+#endif
+	       LOWPOWER_IDLE_OCRAM_SIZE - sizeof(*p));
+
+	dcache_clean_range((void *)lowpower_idle_ocram_base,
+			   LOWPOWER_IDLE_OCRAM_SIZE);
+	/*
+	 * Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	return 0;
+}
+
+static int lowpoweridle_init;
+
+int imx6ul_lowpower_idle(uint32_t power_state __unused,
+			 uintptr_t entry,
+			 uint32_t context_id __unused,
+			 struct sm_nsec_ctx *nsec)
+{
+	int ret;
+	/*
+	 * TODO: move the code to a platform init place, note that
+	 * need to change kernel pm-imx6.c to avoid use LPRAM.
+	 */
+	uint32_t cpuidle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx6_pm_info *p = (struct imx6_pm_info *)cpuidle_ocram_base;
+
+	/*
+	 * TODO:
+	 * Check power_state?
+	 */
+	if (!lowpoweridle_init) {
+		imx6ul_cpuidle_init();
+		lowpoweridle_init = 1;
+	}
+
+	/* Store non-sec ctx regs */
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(cpuidle_ocram_base + sizeof(*p)));
+	/*
+	 * Sometimes cpu_suspend may not really suspended, we need to check
+	 * it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	/* Restore register of different mode in secure world */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	/* Back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+	DMSG("=== Back from Suspended ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/cpuidle/cpuidle-imx7d.c b/core/drivers/pm/imx/cpuidle/cpuidle-imx7d.c
new file mode 100644
index 000000000..41ceeb3fc
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/cpuidle-imx7d.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2018, 2020 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <arm.h>
+#include <arm32.h>
+#include <atomic.h>
+#include <console.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <sm/optee_smc.h>
+#include <sm/psci.h>
+#include <sm/pm.h>
+#include <sm/sm.h>
+#include <string.h>
+#include <util.h>
+
+/*
+ * cpuidle and suspend use the same one,
+ * because lowpower idle and suspend can not reach at the same time
+ */
+
+int imx7d_cpuidle_init(void)
+{
+	uint32_t lowpower_idle_ocram_base = (uint32_t)phys_to_virt(
+			imx_get_ocram_tz_start_addr() +
+			LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx7_pm_info *p =
+		(struct imx7_pm_info *)lowpower_idle_ocram_base;
+
+	dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+	p->va_base = lowpower_idle_ocram_base;
+	p->pa_base = imx_get_ocram_tz_start_addr() + LOWPOWER_IDLE_OCRAM_OFFSET;
+	p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume);
+	p->pm_info_size = sizeof(*p);
+	p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC, 1);
+	p->ddrc_pa_base = DDRC_BASE;
+	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, 1);
+	p->ccm_pa_base = CCM_BASE;
+	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC, 1);
+	p->anatop_pa_base = ANATOP_BASE;
+	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, 1);
+	p->src_pa_base = SRC_BASE;
+	p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE,
+						MEM_AREA_IO_SEC, 1);
+	p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE;
+	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, 1);
+	p->gpc_pa_base = GPC_BASE;
+	p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC, 1);
+	p->gic_pa_base = GIC_BASE;
+
+	p->num_lpi_cpus = 0;
+	p->num_online_cpus = -1;
+
+	memcpy((void *)(lowpower_idle_ocram_base + sizeof(*p)),
+		(void *)(vaddr_t)imx7d_low_power_idle,
+	       LOWPOWER_IDLE_OCRAM_SIZE - sizeof(*p));
+
+	dcache_clean_range((void *)lowpower_idle_ocram_base,
+			   LOWPOWER_IDLE_OCRAM_SIZE);
+	/*
+	 * Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	return 0;
+}
+
+static int lowpoweridle_init;
+
+static void imx_pen_lock(uint32_t cpu)
+{
+	uint32_t cpuidle_ocram_base = (uint32_t)phys_to_virt(
+					imx_get_ocram_tz_start_addr() +
+					LOWPOWER_IDLE_OCRAM_OFFSET,
+					MEM_AREA_TEE_COHERENT,
+					LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx7_pm_info *p =
+		(struct imx7_pm_info *)cpuidle_ocram_base;
+
+	if (cpu == 0) {
+		atomic_store_u32(&p->flag0, 1);
+		dsb();
+		atomic_store_u32(&p->val, cpu);
+		do {
+			dsb();
+		} while (atomic_load_u32(&p->flag1) == 1
+			&& atomic_load_u32(&p->val) == cpu)
+			;
+	} else {
+		atomic_store_u32(&p->flag1, 1);
+		dsb();
+		atomic_store_u32(&p->val, cpu);
+		do {
+			dsb();
+		} while (atomic_load_u32(&p->flag0) == 1
+			&& atomic_load_u32(&p->val) == cpu)
+			;
+	}
+}
+
+static void imx_pen_unlock(int cpu)
+{
+	uint32_t cpuidle_ocram_base = (uint32_t)phys_to_virt(
+					imx_get_ocram_tz_start_addr() +
+					LOWPOWER_IDLE_OCRAM_OFFSET,
+					MEM_AREA_TEE_COHERENT,
+					LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx7_pm_info *p =
+		(struct imx7_pm_info *)cpuidle_ocram_base;
+
+	dsb();
+	if (cpu == 0)
+		atomic_store_u32(&p->flag0, 0);
+	else
+		atomic_store_u32(&p->flag1, 0);
+}
+
+static uint32_t get_online_cpus(void)
+{
+	vaddr_t src_a7rcr1 = core_mmu_get_va(SRC_BASE + SRC_A7RCR1,
+					     MEM_AREA_IO_SEC, sizeof(uint32_t));
+	uint32_t val = io_read32(src_a7rcr1);
+
+	return (val & (1 << SRC_A7RCR1_A7_CORE1_ENABLE_OFFSET)) ? 2 : 1;
+}
+
+int imx7d_lowpower_idle(uint32_t power_state __unused,
+			uintptr_t entry __unused,
+			uint32_t context_id __unused,
+			struct sm_nsec_ctx *nsec)
+{
+	int ret;
+	uint32_t cpuidle_ocram_base = (uint32_t)phys_to_virt(
+					imx_get_ocram_tz_start_addr() +
+					LOWPOWER_IDLE_OCRAM_OFFSET,
+					MEM_AREA_TEE_COHERENT,
+					LOWPOWER_IDLE_OCRAM_SIZE);
+	struct imx7_pm_info *p =
+			(struct imx7_pm_info *)cpuidle_ocram_base;
+	uint32_t type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >>
+		PSCI_POWER_STATE_TYPE_SHIFT;
+	static unsigned int gic_inited;
+	uint32_t cpu_id = get_core_pos();
+
+	imx_pen_lock(cpu_id);
+
+	if (!lowpoweridle_init) {
+		imx7d_cpuidle_init();
+		lowpoweridle_init = 1;
+	}
+
+	if (type != PSCI_POWER_STATE_TYPE_POWER_DOWN)
+		panic();
+
+	p->num_online_cpus = get_online_cpus();
+	p->num_lpi_cpus++;
+
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(cpuidle_ocram_base + sizeof(*p)));
+
+	/*
+	 * Sometimes cpu_suspend may not really suspended, we need to check
+	 * it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		p->num_lpi_cpus--;
+		imx_pen_unlock(cpu_id);
+		DMSG("=== Not suspended, GPC IRQ Pending === %d\n", cpu_id);
+		return 0;
+	}
+
+	/*
+	 * Restore register of different mode in secure world
+	 * When cpu powers up, after ROM init, cpu in secure SVC
+	 * mode, we first need to restore monitor regs.
+	 */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	p->num_lpi_cpus--;
+
+	/* Set entry for back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	if (gic_inited == 0) {
+		/*
+		 * Call the Wakeup Late function to restore some
+		 * HW configuration (e.g. TZASC)
+		 */
+		plat_cpu_wakeup_late();
+
+		boot_primary_init_intc();
+		gic_inited = 1;
+		DMSG("=== Back from Suspended ===\n");
+	} else {
+		boot_secondary_init_intc();
+		gic_inited = 0;
+	}
+
+	imx_pen_unlock(cpu_id);
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sl.S b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sl.S
new file mode 100644
index 000000000..ea4cfa8cc
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sl.S
@@ -0,0 +1,757 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_def.h>
+
+#define MX6_MMDC_MAPSR		0x404
+#define MX6_MMDC_MPDGCTRL0		0x83c
+
+
+
+	.section .text.psci.cpuidle
+
+	.macro  pll_do_wait_lock
+1:
+	ldr	r7, [r10, r8]
+	ands	r7, #0x80000000
+	beq	1b
+
+	.endm
+
+	.macro  ccm_do_wait
+2:
+	ldr	r7, [r10, #0x48]
+	cmp	r7, #0x0
+	bne	2b
+
+	.endm
+
+	.macro  ccm_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	/*
+	 * if in audio_bus_freq_mode, skip to
+	 * audio_mode low power setting.
+	 */
+	cmp 	r1, #0x1
+	beq	audio_mode
+	/*
+	 * Now set DDR rate to 1MHz.
+	 * DDR is from bypassed PLL2 on periph2_clk2 path.
+	 * Set the periph2_clk2_podf to divide by 8.
+	 */
+	ldr	r6, [r10, #0x14]
+	orr	r6, r6, #0x07
+	str	r6, [r10, #0x14]
+
+	/* Now set MMDC PODF to divide by 3. */
+	ldr	r6, [r10, #0x14]
+	bic	r6, r6,	#0x38
+	orr	r6, r6, #0x10
+	str	r6, [r10, #0x14]
+
+	ccm_do_wait
+
+	/* Set the AHB to 3MHz. AXI to 3MHz. */
+	ldr	r6, [r10, #0x14]
+	/*r12 stores the origin AHB podf value */
+	mov	r12, r6
+	orr	r6, r6, #0x1c00
+	orr	r6, r6, #0x70000
+	str	r6, [r10, #0x14]
+
+	ccm_do_wait
+
+	/* Now set ARM to 24MHz.
+	 * Move ARM to be sourced from step_clk
+	 * after setting step_clk to 24MHz.
+	 */
+	ldr	r6, [r10, #0x0c]
+	bic	r6, r6, #0x100
+	str	r6, [r10, #0xc]
+	/*Now pll1_sw_clk to step_clk */
+	ldr	r6, [r10, #0x0c]
+	orr	r6, r6, #0x4
+	str	r6, [r10, #0x0c]
+
+	/* Bypass PLL1 and power it down */
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldr	r6, =(1 << 16)
+	orr	r6, r6, #0x1000
+	str	r6, [r10, #0x04]
+
+	/*
+	 * Set the ARM PODF to divide by 8.
+	 * IPG is at 1.5MHz here, we need ARM to
+	 * run at the 12:5 ratio (WAIT mode issue).
+	 */
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r11, [r10, #0x10]
+	ldr	r6, =0x07
+	str	r6, [r10, #0x10]
+
+	ccm_do_wait
+
+	b	ccm_idle_done
+
+audio_mode:
+	/*
+	 * MMDC is sourced from pll2_200M.
+	 * Set the mmdc_podf to div by 8
+	 */
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r6, [r10, #0x14]
+	orr	r6, r6, #0x38
+	str	r6, [r10, #0x14]
+
+	ccm_do_wait
+
+	/*
+	 * ARM is sourced from pll2_pfd2_400M here.
+	 * switch ARM to bypassed PLL1
+	 */
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r6, [r10, #0x0c]
+	bic	r6, r6, #0x4
+	str	r6, [r10, #0xc]
+
+	/*
+	 * set the arm_podf to divide by 3
+	 * as IPG is at 4MHz, we cannot run
+	 * arm clk above 9.6MHz when system
+	 * enter WAIT mode
+	 */
+	ldr	r11, [r10, #0x10]
+	ldr	r6, =0x2
+	str	r6, [r10, #0x10]
+
+	ccm_do_wait
+
+ccm_idle_done:
+
+	.endm
+
+	.macro  ccm_exit_idle
+
+	/*
+	 * If in audio_bus_freq_mode, skip to
+	 * audio_mode ccm restore.
+	 */
+	cmp	r1, #0x1
+	beq	audio_ccm_restore
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	/* Power up PLL1 and un-bypass it. */
+	ldr	r6, =(1 << 12)
+	str	r6, [r10, #0x08]
+
+	/* Wait for PLL1 to relock */
+	ldr	r8, =0x0
+	pll_do_wait_lock
+
+	ldr	r6, =(1 << 16)
+	str	r6, [r10, #0x08]
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	/* Set PLL1_sw_clk back to PLL1 */
+	ldr	r6, [r10, #0x0c]
+	bic	r6, r6, #0x4
+	str	r6, [r10, #0x0c]
+
+	/* Restore AHB/AXI back */
+	str	r12, [r10, #0x14]
+
+	ccm_do_wait
+
+	/* restore mmdc back to 24MHz*/
+	ldr	r6, [r10, #0x14]
+	bic	r6, r6, #0x3f
+	str	r6, [r10, #0x14]
+
+	ccm_do_wait
+	b	ccm_exit_done
+
+audio_ccm_restore:
+	/* move arm clk back to pll2_pfd2_400M */
+	ldr	r6, [r10, #0xc]
+	orr	r6, r6, #0x4
+	str	r6, [r10, #0xc]
+
+	/* restore mmdc podf */
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r6, [r10, #0x14]
+	bic	r6, r6, #0x38
+	orr	r6, #0x8
+	str	r6, [r10, #0x14]
+
+	ccm_do_wait
+
+ccm_exit_done:
+
+	.endm
+
+	.macro check_pll_state
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	/*
+	 * Check whether any PLL is enabled, as only when
+	 * there is no PLLs enabled, 2p5 can be off and
+	 * only enable the weak one. PLL1 will be powered
+	 * down late, so no need to check PLL1 state.
+	 */
+
+	/* sys PLL2 */
+	ldr	r6, [r10, #0x30]
+	ands r6, r6, #(1 << 31)
+	bne	1f
+
+	/* usb PLL3 */
+	ldr	r6, [r10, #0x10]
+	ands	r6, r6, #(1 << 31)
+	bne	1f
+
+	/* audio PLL4 */
+	ldr	r6, [r10, #0x70]
+	ands	r6, r6, #(1 << 31)
+	bne	1f
+
+	/* video PLL5 */
+	ldr	r6, [r10, #0xa0]
+	ands	r6, r6, #(1 << 31)
+	bne	1f
+
+	/* enet PLL6 */
+	ldr	r6, [r10, #0xe0]
+	ands	r6, r6, #(1 << 31)
+	bne	1f
+
+	/* usb host PLL7 */
+	ldr	r6, [r10, #0x20]
+	ands	r6, r6, #(1 << 31)
+	bne	1f
+
+	ldr	r4, =0x1
+	b	check_done
+1:
+	ldr	r4, =0x0
+
+check_done:
+	.endm
+
+	.macro  anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	cmp	r4, #0x0
+	beq	anatop_enter_done
+
+	/* Disable 1p1 brown out. */
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldr	r6, [r10, #0x110]
+	bic	r6, r6, #0x2
+	str	r6, [r10, #0x110]
+	/*
+	 * Set the OSC bias current to -37.5%
+	 * to drop the power on VDDHIGH.
+	 */
+	ldr	r6, [r10, #0x150]
+	orr	r6, r6, #0xc000
+	str	r6, [r10, #0x150]
+
+	/*
+	 * if the usb VBUS wakeup is enabled, skip
+	 * disable main 2p5.
+	 */
+	cmp	r2, #0x1
+	beq	anatop_enter_done
+
+	/* Enable the week 2p5 */
+	ldr	r6, [r10, #0x130]
+	orr	r6, r6, #0x40000
+	str	r6, [r10, #0x130]
+
+	/* Disable main 2p5. */
+	ldr	r6, [r10, #0x130]
+	bic	r6, r6, #0x1
+	str	r6, [r10, #0x130]
+
+	/*
+	 * Cannot disable regular bandgap
+	 * in LDO-enable mode. The bandgap
+	 * is required for ARM-LDO to regulate
+	 * the voltage.
+	 */
+	ldr	r6, [r10, #0x140]
+	and	r6, r6, #0x1f
+	cmp	r6, #0x1f
+	bne	anatop_enter_done
+
+	/* Enable low power bandgap */
+	ldr	r6, [r10, #0x260]
+	orr	r6, r6, #0x20
+	str	r6, [r10, #0x260]
+
+	/*
+	 * Turn off the bias current
+	 * from the regular bandgap.
+	 */
+	ldr	r6, [r10, #0x260]
+	orr	r6, r6, #0x80
+	str	r6, [r10, #0x260]
+
+	/*
+	 * Clear the REFTTOP+SELFBIASOFF,
+	 * self_bais circuit of the band gap.
+	 * Per RM, should be cleared when
+	 * band gap is powered down.
+	 */
+	ldr	r6, [r10, #0x150]
+	bic	r6, r6, #0x8
+	str	r6, [r10, #0x150]
+
+	/* Power down the regular bandgap */
+	ldr	r6, [r10, #0x150]
+	orr	r6, r6, #0x1
+	str	r6, [r10, #0x150]
+anatop_enter_done:
+
+	.endm
+
+	.macro  anatop_exit_idle
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	cmp	r4, #0x0
+	beq	skip_anatop_restore
+
+	cmp	r2, #0x1
+	beq	ldo2p5_not_disabled
+	/*
+	 * Regular bandgap will not be disabled
+	 * in LDO-enabled mode as it is required
+	 * for ARM-LDO to reguulate the voltage.
+	 */
+	ldr	r6, [r10, #0x140]
+	and	r6, r6, #0x1f
+	cmp	r6, #0x1f
+	bne	skip_bandgap_restore
+
+	/* Power up the regular bandgap */
+	ldr	r6, [r10, #0x150]
+	bic	r6, r6, #0x1
+	str	r6, [r10, #0x150]
+
+	/* wait for bandgap stable */
+3:
+	ldr	r6, [r10, #0x150]
+	and	r6, r6, #0x80
+	cmp	r6, #0x80
+	bne	3b
+
+	/* now disable bandgap self-bias circuit */
+	ldr	r6, [r10, #0x150]
+	orr	r6, r6, #0x8
+	str	r6, [r10, #0x150]
+
+	/* Turn on the bias current
+	 * from the regular bandgap.
+	 */
+	ldr	r6, [r10, #0x260]
+	bic	r6, r6, #0x80
+	str	r6, [r10, #0x260]
+
+	/* Disable the low power bandgap */
+	ldr	r6, [r10, #0x260]
+	bic	r6, r6, #0x20
+	str	r6, [r10, #0x260]
+
+skip_bandgap_restore:
+	/* Enable main 2p5. */
+	ldr	r6, [r10, #0x130]
+	orr	r6, r6,	#0x1
+	str	r6, [r10, #0x130]
+
+	/* Ensure the 2p5 is up */
+5:
+	ldr	r6, [r10, #0x130]
+	and	r6, r6, #0x20000
+	cmp	r6, #0x20000
+	bne	5b
+
+	/* Disable the weak 2p5 */
+	ldr	r6, [r10, #0x130]
+	bic	r6, r6, #0x40000
+	str	r6, [r10, #0x130]
+
+ldo2p5_not_disabled:
+	/*
+	 * Set the OSC bias current to max
+	 * value for normal operation.
+	 */
+	ldr	r6, [r10, #0x150]
+	bic	r6, r6, #0xc000
+	str	r6, [r10, #0x150]
+
+	/* Enable 1p1 brown out, */
+	ldr	r6, [r10, #0x110]
+	orr	r6, r6, #0x2
+	str	r6, [r10, #0x110]
+
+skip_anatop_restore:
+
+	.endm
+
+	.macro  disable_l1_dcache
+
+	/* disable d-cache */
+	mrc	p15, 0, r7, c1, c0, 0
+	bic	r7, r7, #(1 << 2)
+	mcr	p15, 0, r7, c1, c0, 0
+
+	dsb
+	isb
+
+	.endm
+
+	.macro  mmdc_enter_dvfs_mode
+
+	/* disable automatic power saving. */
+	ldr	r7, [r10, #MX6_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #MX6_MMDC_MAPSR]
+
+	/* disable power down timer */
+	ldr	r7, [r10, #0x04]
+	bic	r7, r7, #0xff00
+	str	r7, [r10, #0x04]
+
+	/* Make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r10, #MX6_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6_MMDC_MAPSR]
+
+poll_dvfs_set:
+	ldr	r7, [r10, #MX6_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	poll_dvfs_set
+
+	/* set SBS step-by step mode */
+	ldr	r7, [r10, #0x410]
+	orr	r7, r7, #0x100
+	str	r7, [r10, #0x410]
+
+	.endm
+
+	.macro  resume_mmdc
+	/* restore MMDC IO */
+	ldr	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+6:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	str	r9, [r10, r8]
+	add	r7, r7, #0x4
+	subs	r6, r6, #0x1
+	bne	6b
+
+	/*
+	 * Need to reset the FIFO to avoid MMDC lockup
+	 * caused because of floating/changing the
+	 * configuration of many DDR IO pads.
+	 */
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	/* reset read FIFO, RST_RD_FIFO */
+	ldr	r7, =MX6_MMDC_MPDGCTRL0
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+7:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	7b
+
+	/* reset FIFO a second time */
+	ldr	r7, =MX6_MMDC_MPDGCTRL0
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+8:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 <<31)
+	bne	8b
+
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	/* Let DDR out of self-refresh */
+	ldr	r7, [r10, #MX6_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6_MMDC_MAPSR]
+9:
+	ldr	r7, [r10, #MX6_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	9b
+
+	/* enable power down timer */
+	ldr	r7, [r10, #0x04]
+	orr	r7, r7, #0x5500
+	str	r7, [r10, #0x04]
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r10, #MX6_MMDC_MAPSR]
+	bic	r7, r7,	#0x1
+	str	r7, [r10, #MX6_MMDC_MAPSR]
+
+	/* Clear SBS - unblock DDR accesses */
+	ldr	r7, [r10, #0x410]
+	bic	r7, r7, #0x100
+	str	r7, [r10, #0x410]
+
+	.endm
+
+	.macro	tlb_set_to_ocram
+
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_TTBR1_OFF]
+
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_TTBR0_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
+	 *     and 2-4G is translated by TTBR1.
+	 */
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1 */
+	write_ttbr1 r7
+	write_ttbr0 r7
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+	.endm
+
+	.macro	tlb_back_to_ddr
+
+	/* Restore the TTBCR */
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	dsb
+	isb
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r6, [r0, #PM_INFO_TTBR1_OFF]
+	write_ttbr1 r6
+	ldr	r6, [r0, #PM_INFO_TTBR0_OFF]
+	write_ttbr0 r6
+	isb
+
+	.endm
+
+.extern iram_tlb_phys_addr
+
+/*
+ * imx6sl_low_power_wfi code
+ * r0: wfi code base address
+ * r1: audio_bus_freq mode stat
+ * r2: vbus_ldo status
+ * r4: used for store the PLLs state
+ * r11: used for saving the ARM_PODF origin value
+ * r12: used for saving AHB_PODF origin value
+ */
+	.align 3
+FUNC imx6sl_low_power_idle, :
+	push {r4-r12}
+
+	tlb_set_to_ocram
+	disable_l1_dcache
+
+#ifdef CFG_PL310
+	/* sync L2 */
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+	/* Wait for background operations to complete. */
+wait_for_l2_idle:
+	ldr	r6, [r10, #0x730]
+	cmp	r6, #0x0
+	bne	wait_for_l2_idle
+
+	mov	r6, #0x0
+	str	r6, [r10, #0x730]
+	/* disable L2 */
+	str	r6, [r10, #0x100]
+
+	dsb
+	isb
+#endif
+
+	/* parse the ldo and audio mode state */
+	ldr	r10, [r0, #PM_INFO_IDLE_STATE]
+	and	r2, r10, #(1 << 1) /* ldo status */
+	lsr	r2, r2, #0x1
+	and	r1, r10, #(1 << 2) /* audio mode status */
+	lsr	r1, r1, #0x2
+
+	/* make sure MMDC in self-refresh */
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	mmdc_enter_dvfs_mode
+	/* save DDR IO settings and set to LPM mode*/
+	ldr	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr	r6, =0x0
+	ldr	r7, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r8, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r8, r8, r0
+
+save_and_set_mmdc_io_lpm:
+	ldr	r9, [r8], #0x4
+	ldr	r5, [r10, r9]
+	str	r6, [r10, r9]
+	str	r5, [r8], #0x4
+	add	r8, r8, #0x4
+	subs	r7, r7, #0x1
+	bne	save_and_set_mmdc_io_lpm
+
+	/* check the PLLs lock state */
+	check_pll_state
+
+	ccm_enter_idle
+	/* if in audio low power mode, no
+	 * need to do anatop setting.
+	 */
+	cmp	r1, #0x1
+	beq	do_wfi
+	anatop_enter_idle
+do_wfi:
+	wfi
+	/*
+	 * Add these nops so that the
+	 * prefetcher will not try to get
+	 * any instrutions from DDR.
+	 * The prefetch depth is about 23
+	 * on A9, so adding 25 nops.
+	 */
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	/*
+	 * restore the ARM PODF first to speed
+	 * up the restore procedure
+	 */
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	/* Restore arm_clk_podf */
+	str	r11, [r10, #0x10]
+	ccm_do_wait
+
+	/*
+	 * if in audio low power mode, skip
+	 * restore the anatop setting.
+	 */
+	cmp	r1, #0x1
+	beq 	skip_analog_restore
+	anatop_exit_idle
+
+skip_analog_restore:
+	ccm_exit_idle
+	resume_mmdc
+
+	/* enable d-cache */
+	mrc	p15, 0, r7, c1, c0, 0
+	orr	r7, r7, #(1 << 2)
+	mcr	p15, 0, r7, c1, c0, 0
+
+#ifdef CFG_PL310
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+	mov	r7, #0x1
+	/* enable L2 */
+	str	r7, [r10, #0x100]
+#endif
+	tlb_back_to_ddr
+
+	/* Restore register */
+	pop	{r4 - r12}
+	mov	pc, lr
+
+	/*
+	 * Add ltorg here to ensure that all
+	 * literals are stored here and are
+	 * within the text space.
+	 */
+	.ltorg
+mx6sl_lpm_wfi_end:
+END_FUNC imx6sl_low_power_idle
diff --git a/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sll.S b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sll.S
new file mode 100644
index 000000000..1d45980c5
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sll.S
@@ -0,0 +1,773 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_def.h>
+
+	.section .text.psci.cpuidle
+
+	.macro  sync_l2_cache
+
+	/* sync L2 cache to drain L2's buffers to DRAM. */
+#ifdef CFG_PL310
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+	mov	r7, #0x0
+	str	r7, [r10, #0x730]
+1:
+	ldr	r7, [r10, #0x730]
+	ands	r7, r7, #0x1
+	bne	1b
+
+#endif
+
+	.endm
+
+	.macro	pll_do_wait_lock
+1:
+	ldr	r7, [r10, r8]
+	ands	r7, #0x80000000
+	beq	1b
+
+	.endm
+
+	.macro	ccm_do_wait
+2:
+	ldr	r7, [r10, #0x48]
+	cmp	r7, #0x0
+	bne	2b
+
+	.endm
+
+	.macro ccm_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+
+	/* set ahb to 3MHz */
+	ldr	r7, [r10, #0x14]
+	orr	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	/* set perclk to 6MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	orr	r7, r7, #0x3
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc to 1MHz, periph2_clk2 need to be @8MHz */
+	ldr	r7, [r10, #0x14]
+	orr     r7, r7, #0x2
+	orr	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/*
+	 * disable pll2, suppose when system enter low
+	 * power idle mode, only 396MHz pfd needs pll2,
+	 * now we switch arm clock to OSC, we can disable
+	 * pll2 now, gate pll2_pfd2 first.
+	 */
+	ldr	r7, [r10, #0x100]
+	orr	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	ldr	r7, [r10, #0x30]
+	orr	r7, r7, #0x1000
+	bic	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	.endm
+
+	.macro	ccm_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* enable pll2 and pll2_pfd2 */
+	ldr	r7, [r10, #0x30]
+	bic	r7, r7, #0x1000
+	orr	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	ldr	r8, =0x30
+	pll_do_wait_lock
+
+	ldr	r7, [r10, #0x100]
+	bic	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_CCM_P_OFF]
+
+	/* set perclk back to 24MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x7
+	bic	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	/* set ahb div back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	.endm
+
+	.macro	anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/*
+	 * check whether any PLL is enabled, as only when
+	 * there is no PLLs enabled, 2P5 and 1P1 can be
+	 * off and only enable weak ones.
+	 */
+
+	/* arm pll1 */
+	ldr	r7, [r10, #0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* sys pll2 */
+	ldr	r7, [r10, #0x30]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb pll3 */
+	ldr	r7, [r10, #0x10]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* audio pll4 */
+	ldr	r7, [r10, #0x70]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* vidio pll5 */
+	ldr	r7, [r10, #0xa0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enet pll6 */
+	ldr	r7, [r10, #0xe0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb host pll7 */
+	ldr	r7, [r10, #0x20]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enable weak 2P5 and turn off regular 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* enable weak 1p1 and turn off regular 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+
+	/* low power band gap enable */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+
+	/* turn off the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/*
+	 * clear the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 * Per RM, should be cleared when
+	 * band gap is powered down.
+	 */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x8
+	str	r7, [r10, #0x150]
+
+	/* turn off regular bandgap */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+
+10:
+	/* switch to RC-OSC */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x40000000
+	str	r7, [r10, #0x150]
+
+	/* lower OSC current by 37.5% */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* disconnect vdd_high_in and vdd_snvs_in */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1000
+	str	r7, [r10, #0x150]
+
+	.endm
+
+	.macro anatop_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* increase OSC current to normal */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* turn on XTAL-OSC and detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x40000000
+	orr	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+
+	/* wait for XTAL stable */
+14:
+	ldr	r7, [r10, #0x150]
+	ands	r7, r7, #0x8000
+	beq	14b
+
+	/* switch to XTAL-OSC */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+15:
+	/* check whether we need to enable 2P5/1P1 */
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x40000
+	beq	11f
+
+	/* turn on regular bandgap and wait for stable */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+13:
+	ldr	r7, [r10, #0x150]
+	ands	r7, #0x80
+	beq	13b
+
+	/*
+	 * set the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 */
+	ldr     r7, [r10, #0x150]
+	orr     r7, r7, #0x8
+	str     r7, [r10, #0x150]
+
+	/* turn on the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/* low power band gap disable */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+12:
+	/* enable regular 2P5 and turn off weak 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* Ensure the 2P5 is up. */
+3:
+	ldr	r7, [r10, #0x130]
+	ands	r7, r7, #0x20000
+	beq	3b
+	ldr	r7, [r10, #0x130]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+
+	/* enable regular 1p1 and turn off weak 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+4:
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x20000
+	beq	4b
+	ldr	r7, [r10, #0x110]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+11:
+	.endm
+
+	.macro	disable_l1_dcache
+
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	push	{r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r7, =dcache_op_all
+	mov	lr, pc
+	mov	pc, r7
+	pop	{r0 - r10, lr}
+
+	/* disable d-cache */
+	read_sctlr r7
+	bic	r7, r7, #0x4
+	write_sctlr r7
+	dsb
+	isb
+
+	push	{r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r7, =dcache_op_all
+	mov	lr, pc
+	mov	pc, r7
+	pop	{r0 - r10, lr}
+
+	.endm
+
+	.macro mmdc_enter_dvfs_mode
+
+	/* disable automatic power savings. */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+
+	/* make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+5:
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	5b
+
+	.endm
+
+	.macro	resume_mmdc
+
+	/* restore MMDC IO */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_IOMUXC_P_OFF]
+
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+6:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	add	r7, r7, #0x4
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	6b
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_MMDC0_P_OFF]
+
+	/* reset read FIFO, RST_RD_FIFO */
+	ldr	r7, =MX6Q_MMDC_MPDGCTRL0
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+7:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	7b
+
+	/* reset FIFO a second time */
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+8:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	8b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+9:
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	9b
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+
+	.endm
+
+	.macro	tlb_set_to_ocram
+
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_TTBR1_OFF]
+
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_TTBR0_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
+	 *     and 2-4G is translated by TTBR1.
+	 */
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1 */
+	write_ttbr1 r7
+	write_ttbr0 r7
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+	.endm
+
+	.macro	tlb_back_to_ddr
+
+	/* Restore the TTBCR */
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	dsb
+	isb
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r6, [r0, #PM_INFO_TTBR1_OFF]
+	write_ttbr1 r6
+	ldr	r6, [r0, #PM_INFO_TTBR0_OFF]
+	write_ttbr0 r6
+	isb
+
+	.endm
+
+.extern iram_tlb_phys_addr
+
+/* imx6sx_low_power_idle */
+
+	.align 3
+FUNC imx6sll_low_power_idle, :
+	push	{r4 - r10}
+
+	/* get necessary info from pm_info */
+	ldr	r1, [r0, #PM_INFO_PBASE_OFF]
+	ldr	r2, [r0, #PM_INFO_INFO_SIZE_OFF]
+
+	/*
+	 * counting the resume address in iram
+	 * to set it in SRC register.
+	 */
+	ldr	r5, =imx6sll_low_power_idle
+	ldr     r6, =wakeup
+	sub	r6, r6, r5
+	add     r8, r1, r2
+	add	r3, r8, r6
+
+	/* store physical resume addr and pm_info address. */
+	ldr	r10, [r0, #PM_INFO_SRC_V_OFF]
+	str	r3, [r10, #0x20]
+	str	r1, [r10, #0x24]
+
+	/* save disagnostic register */
+	mrc	p15, 0, r7, c15, c0, 1
+	str	r7, [r0, #PM_INFO_SAVED_DIAGNOSTIC_OFF]
+
+	/* set ARM power to be gated */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x1
+	str	r7, [r10, #0x2a0]
+
+	disable_l1_dcache
+
+#ifdef CFG_PL310
+	/* sync L2 */
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+
+	/* Wait for background operations to complete. */
+wait_for_l2_to_idle:
+	ldr	r7, [r10, #0x730]
+	cmp	r7, #0x0
+	bne	wait_for_l2_to_idle
+
+	mov	r7, #0x0
+	str	r7, [r10, #0x730]
+	/* disable L2 */
+	str	r7, [r10, #0x100]
+
+	dsb
+	isb
+#endif
+
+	tlb_set_to_ocram
+
+	/* make sure MMDC in self-refresh */
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	mmdc_enter_dvfs_mode
+
+	/* save DDR IO settings */
+	ldr     r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr     r6, =0x0
+	ldr     r7, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr     r8, =PM_INFO_MMDC_IO_VAL_OFF
+	add     r8, r8, r0
+save_and_set_mmdc_io_lpm:
+	ldr	r9, [r8], #0x4
+	ldr	r5, [r10, r9]
+	str	r6, [r10, r9]
+	str	r5, [r8], #0x4
+	/* We reuse the structure for suspend/resume,
+	 * the step is 12 byte.
+	 */
+	add	r8, r8, #0x4
+	subs	r7, r7, #0x1
+	bne	save_and_set_mmdc_io_lpm
+
+	mov	r5, #0x0
+	ccm_enter_idle
+	anatop_enter_idle
+
+	/*
+	 * mask all GPC interrupts before
+	 * enabling the RBC counters to
+	 * avoid the counter starting too
+	 * early if an interrupt is already
+	 * pending.
+	 */
+	ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r4, [r10, #MX6Q_GPC_IMR1]
+	ldr	r5, [r10, #MX6Q_GPC_IMR2]
+	ldr	r6, [r10, #MX6Q_GPC_IMR3]
+	ldr	r7, [r10, #MX6Q_GPC_IMR4]
+
+	ldr	r3, =0xffffffff
+	str	r3, [r10, #MX6Q_GPC_IMR1]
+	str	r3, [r10, #MX6Q_GPC_IMR2]
+	str	r3, [r10, #MX6Q_GPC_IMR3]
+	str	r3, [r10, #MX6Q_GPC_IMR4]
+
+	/*
+	 * enable the RBC bypass counter here
+	 * to hold off the interrupts. RBC counter
+	 * = 4 (120us). With this setting, the latency
+	 * from wakeup interrupt to ARM power up
+	 * is ~130uS.
+	 */
+	ldr     r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r3, [r10, #MX6Q_CCM_CCR]
+	bic	r3, r3, #(0x3f << 21)
+	orr	r3, r3, #(0x4 << 21)
+	str	r3, [r10, #MX6Q_CCM_CCR]
+
+	/* enable the counter. */
+	ldr	r3, [r10, #MX6Q_CCM_CCR]
+	orr	r3, r3, #(0x1 << 27)
+	str	r3, [r10, #MX6Q_CCM_CCR]
+
+	/* unmask all the GPC interrupts. */
+	ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	str	r4, [r10, #MX6Q_GPC_IMR1]
+	str	r5, [r10, #MX6Q_GPC_IMR2]
+	str	r6, [r10, #MX6Q_GPC_IMR3]
+	str	r7, [r10, #MX6Q_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (3usec)
+	 * ARM is at 24MHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r4, =50
+rbc_loop:
+	subs	r4, r4, #0x1
+	bne	rbc_loop
+
+	wfi
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	mov	r5, #0x0
+	anatop_exit_idle
+	ccm_exit_idle
+	resume_mmdc
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	/* enable d-cache */
+	mrc	p15, 0, r7, c1, c0, 0
+	orr	r7, r7, #(1 << 2)
+	mcr	p15, 0, r7, c1, c0, 0
+
+#ifdef CFG_PL310
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+	mov	r7, #0x1
+	/* enable L2 */
+	str	r7, [r10, #0x100]
+#endif
+
+	tlb_back_to_ddr
+
+	/* Restore registers */
+	pop	{r4 - r10}
+	mov	pc, lr
+
+wakeup:
+	/* switch monitor mode */
+	mov	r3, #0x16
+	mov	r4, #((1 << 6) | (1 << 7))
+	orr	r3, r3, r4
+	msr	cpsr, r3
+	nop
+	nop
+	nop
+
+	/* Not use arm_cl1_i_inv_all, we are in ocram now */
+	/* Invalidate Entire Instruction Cache */
+	write_icialluis
+	DSB
+	/* Flush entire branch target cache */
+	write_bpiallis
+	dsb
+	isb
+
+	/* enable the Icache and branch prediction */
+	mov	r1, #0x1800
+	mcr	p15, 0, r1, c1, c0, 0
+	isb
+
+	/* restore disagnostic register */
+	ldr	r7, [r0, #PM_INFO_SAVED_DIAGNOSTIC_OFF]
+	mcr	p15, 0, r7, c15, c0, 1
+
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_TEE_RESUME_OFF]
+	/* clear core0's entry and parameter */
+	ldr	r10, [r0, #PM_INFO_SRC_P_OFF]
+	mov	r7, #0x0
+	str	r7, [r10, #MX6Q_SRC_GPR1]
+	str	r7, [r10, #MX6Q_SRC_GPR2]
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_P_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	mov	r5, #0x1
+	anatop_exit_idle
+	ccm_exit_idle
+	resume_mmdc
+
+	/* Restore registers */
+	mov	pc, lr
+	.ltorg
+mx6sll_lpm_wfi_end:
+END_FUNC  imx6sll_low_power_idle
diff --git a/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sx.S b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sx.S
new file mode 100644
index 000000000..0c793a7f7
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6sx.S
@@ -0,0 +1,924 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_def.h>
+/*
+#define PM_INFO_MX6SX_IDLE_PBASE_OFF		0x0
+#define PM_INFO_MX6SX_IDLE_RESUME_ADDR_OFF	0x4
+#define PM_INFO_MX6SX_IDLE_SIZE_OFF		0x8
+#define PM_INFO_MX6SX_IDLE_TTBR0_OFF		0xc
+#define PM_INFO_MX6SX_IDLE_TTBR1_OFF		0x10
+#define PM_INFO_MX6SX_IDLE_MMDC_P_OFF		0x14
+#define PM_INFO_MX6SX_IDLE_MMDC_V_OFF		0x18
+#define PM_INFO_MX6SX_IDLE_IOMUXC_P_OFF		0x1c
+#define PM_INFO_MX6SX_IDLE_IOMUXC_V_OFF		0x20
+#define PM_INFO_MX6SX_IDLE_CCM_P_OFF		0x24
+#define PM_INFO_MX6SX_IDLE_CCM_V_OFF		0x28
+#define PM_INFO_MX6SX_IDLE_GPC_P_OFF		0x2c
+#define PM_INFO_MX6SX_IDLE_GPC_V_OFF		0x30
+#define PM_INFO_MX6SX_IDLE_L2_P_OFF		0x34
+#define PM_INFO_MX6SX_IDLE_L2_V_OFF		0x38
+#define PM_INFO_MX6SX_IDLE_ANATOP_P_OFF		0x3c
+#define PM_INFO_MX6SX_IDLE_ANATOP_V_OFF		0x40
+#define PM_INFO_MX6SX_IDLE_SRC_P_OFF		0x44
+#define PM_INFO_MX6SX_IDLE_SRC_V_OFF		0x48
+#define PM_INFO_MX6SX_IDLE_SEMA4_P_OFF		0x4c
+#define PM_INFO_MX6SX_IDLE_SEMA4_V_OFF		0x50
+#define PM_INFO_MX6SX_IDLE_DIAGNOSTIC_OFF	0x54
+#define PM_INFO_MX6SX_IDLE_MMDC_IO_NUM_OFF	0x58
+#define PM_INFO_MX6SX_IDLE_MMDC_IO_VAL_OFF	0x5c
+*/
+#define	MX6SX_MMDC_MAPSR	0x404
+#define MX6SX_MMDC_MPDGCTRL0	0x83c
+#define MX6SX_SRC_GPR1		0x20
+#define MX6SX_SRC_GPR2		0x24
+#define MX6SX_GPC_IMR1		0x08
+#define MX6SX_GPC_IMR2		0x0c
+#define MX6SX_GPC_IMR3		0x10
+#define MX6SX_GPC_IMR4		0x14
+#define MX6SX_CCM_CCR		0x0
+
+	.section .text.psci.cpuidle
+
+	.macro	pll_do_wait_lock
+1:
+	ldr	r7, [r10, r8]
+	ands	r7, #0x80000000
+	beq	1b
+
+	.endm
+
+	.macro	ccm_do_wait
+2:
+	ldr	r7, [r10, #0x48]
+	cmp	r7, #0x0
+	bne	2b
+
+	.endm
+
+	.macro	ccm_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+
+	/* set ahb to 3MHz */
+	ldr	r7, [r10, #0x14]
+	orr	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	/* set perclk to 6MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	orr	r7, r7, #0x3
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc to 1MHz, periph2_clk2 need to be @8MHz */
+	ldr	r7, [r10, #0x14]
+	orr     r7, r7, #0x2
+	orr	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+
+	/* set pll1_sw to from pll1 main */
+	ldr	r7, [r10, #0xc]
+	bic	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	/* set step from osc */
+	ldr	r7, [r10, #0xc]
+	bic	r7, r7, #0x100
+	str	r7, [r10, #0xc]
+
+	/* set pll1_sw to from step */
+	ldr	r7, [r10, #0xc]
+	orr	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/* Disable PLL1 bypass output */
+	ldr	r7, [r10]
+	bic	r7, r7, #0x12000
+	str	r7, [r10]
+
+	/*
+	 * disable pll2, suppose when system enter low
+	 * power idle mode, only 396MHz pfd needs pll2,
+	 * now we switch arm clock to OSC, we can disable
+	 * pll2 now, gate pll2_pfd2 first.
+	 */
+	ldr	r7, [r10, #0x100]
+	orr	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	ldr	r7, [r10, #0x30]
+	orr	r7, r7, #0x1000
+	bic	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	.endm
+
+	.macro	ccm_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* enable pll2 and pll2_pfd2 */
+	ldr	r7, [r10, #0x30]
+	bic	r7, r7, #0x1000
+	orr	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	ldr	r8, =0x30
+	pll_do_wait_lock
+
+	ldr	r7, [r10, #0x100]
+	bic	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	/* enable PLL1 bypass output */
+	ldr	r7, [r10]
+	orr	r7, r7, #0x12000
+	str	r7, [r10]
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_CCM_P_OFF]
+
+	/* set perclk back to 24MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x7
+	bic	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	/* set ahb div back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	/* set pll1_sw to from pll1 main */
+	ldr	r7, [r10, #0xc]
+	bic	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	/* set step from pll2_pfd2 */
+	ldr	r7, [r10, #0xc]
+	orr	r7, r7, #0x100
+	str	r7, [r10, #0xc]
+
+	/* set pll1_sw to from step */
+	ldr	r7, [r10, #0xc]
+	orr	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	.endm
+
+	.macro	anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/*
+	 * check whether any PLL is enabled, as only when
+	 * there is no PLLs enabled, 2P5 and 1P1 can be
+	 * off and only enable weak ones.
+	 */
+
+	/* arm pll1 */
+	ldr	r7, [r10, #0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* sys pll2 */
+	ldr	r7, [r10, #0x30]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb pll3 */
+	ldr	r7, [r10, #0x10]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* audio pll4 */
+	ldr	r7, [r10, #0x70]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* vidio pll5 */
+	ldr	r7, [r10, #0xa0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enet pll6 */
+	ldr	r7, [r10, #0xe0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb host pll7 */
+	ldr	r7, [r10, #0x20]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enable weak 2P5 and turn off regular 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* enable weak 1p1 and turn off regular 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+
+	/* check whether ARM LDO is bypassed */
+	ldr	r7, [r10, #0x140]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1f
+	bne	10f
+
+	/* low power band gap enable */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+
+	/* turn off the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/*
+	 * clear the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 * Per RM, should be cleared when
+	 * band gap is powered down.
+	 */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x8
+	str	r7, [r10, #0x150]
+
+	/* turn off regular bandgap */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+
+	/* only switch to RC-OSC clk after TO1.2 */
+	ldr	r7, [r10, #0x260]
+	and	r7, r7, #0x3
+	cmp	r7, #0x2
+	blt	10f
+
+	/* switch to RC-OSC */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x40000000
+	str	r7, [r10, #0x150]
+10:
+	/* lower OSC current by 37.5% */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	.endm
+
+	.macro anatop_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* increase OSC current to normal */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* only switch to RC-OSC after TO1.2 */
+	ldr	r7, [r10, #0x260]
+	and	r7, r7, #0x3
+	cmp	r7, #0x2
+	blt	15f
+
+	/* turn on XTAL-OSC and detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x40000000
+	orr	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+
+	/* wait for XTAL stable */
+14:
+	ldr	r7, [r10, #0x150]
+	ands	r7, r7, #0x8000
+	beq	14b
+
+	/* switch to XTAL-OSC */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+15:
+	/* check whether we need to enable 2P5/1P1 */
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x40000
+	beq	11f
+
+	/* check whether ARM LDO is bypassed */
+	ldr	r7, [r10, #0x140]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1f
+	bne	12f
+
+	/* turn on regular bandgap and wait for stable */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+13:
+	ldr	r7, [r10, #0x150]
+	ands	r7, #0x80
+	beq	13b
+
+	/*
+	 * set the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 */
+	ldr     r7, [r10, #0x150]
+	orr     r7, r7, #0x8
+	str     r7, [r10, #0x150]
+
+	/* turn on the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/* low power band gap disable */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+12:
+	/* enable regular 2P5 and turn off weak 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* Ensure the 2P5 is up. */
+3:
+	ldr	r7, [r10, #0x130]
+	ands	r7, r7, #0x20000
+	beq	3b
+	ldr	r7, [r10, #0x130]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+
+	/* enable regular 1p1 and turn off weak 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+4:
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x20000
+	beq	4b
+	ldr	r7, [r10, #0x110]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+11:
+	.endm
+
+	.macro	disable_l1_dcache
+
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	push	{r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	/* disable d-cache */
+	/* disable d-cache */
+	read_sctlr r7
+	bic	r7, r7, #0x4
+	write_sctlr r7
+	dsb
+	isb
+
+	push	{r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	.endm
+
+	.macro mmdc_enter_dvfs_mode
+
+	/* disable automatic power savings. */
+	ldr	r7, [r10, #MX6SX_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #MX6SX_MMDC_MAPSR]
+
+	/* disable power down timer */
+	ldr	r7, [r10, #0x4]
+	bic	r7, r7, #0xff00
+	str	r7, [r10, #0x4]
+
+	/* make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r10, #MX6SX_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6SX_MMDC_MAPSR]
+5:
+	ldr	r7, [r10, #MX6SX_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	5b
+
+	.endm
+
+	.macro	resume_mmdc
+	/* restore MMDC IO */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_IOMUXC_P_OFF]
+
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+6:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x8
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	6b
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_MMDC0_P_OFF]
+
+	/* reset read FIFO, RST_RD_FIFO */
+	ldr	r7, =MX6SX_MMDC_MPDGCTRL0
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+7:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	7b
+
+	/* reset FIFO a second time */
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+8:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	8b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r10, #MX6SX_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6SX_MMDC_MAPSR]
+9:
+	ldr	r7, [r10, #MX6SX_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	9b
+
+	/* enable power down timer */
+	ldr	r7, [r10, #0x4]
+	orr	r7, r7, #0x5500
+	str	r7, [r10, #0x4]
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r10, #MX6SX_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #MX6SX_MMDC_MAPSR]
+
+	.endm
+
+	.macro  sema4_lock
+
+	/* lock share memory sema4 */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_SEMA4_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_SEMA4_P_OFF]
+	ldrb	r6, =0x1
+16:
+	ldrb	r7, [r10, #0x6]
+	cmp	r7, #0x0
+	bne	16b
+	strb	r6, [r10, #0x6]
+
+	.endm
+
+	.macro  sema4_unlock
+
+	/* unlock share memory sema4 */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_SEMA4_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_SEMA4_P_OFF]
+	ldrb	r6, =0x0
+	strb	r6, [r10, #0x6]
+
+	.endm
+
+	.macro	tlb_set_to_ocram
+
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_TTBR1_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
+	 *     and 2-4G is translated by TTBR1.
+	 */
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #0x800
+	write_sctlr r6
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1/TTBR0 */
+	write_ttbr1 r7
+
+	/* Read TTBCR and set PD0=1 and PD1=0 */
+	/* Warning: unknown behaviour if LPAE is enabled */
+	read_ttbcr r6
+	bic	r6, r6, #0x30
+	orr	r6, r6, #0x10
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+	.endm
+
+	.macro	tlb_back_to_ddr
+
+	/* Restore the TTBCR */
+
+	dsb
+	isb
+
+	/* Read TTBCR and set PD0=0 and PD1=0 */
+	read_ttbcr r6
+	bic	r6, r6, #0x30
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	dsb
+	isb
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r6, [r0, #PM_INFO_TTBR1_OFF]
+	write_ttbr1 r6
+
+	isb
+
+	.endm
+
+.extern iram_tlb_phys_addr
+
+	.align 3
+/**
+ * @brief   Prepare and switch the device to enter in low power mode.
+ *          Function is executed in OCRAM.
+ *          If success, the device is reset.
+ *          Operation can be cancel and in this case the device is
+ *          not reset, and returns to the caller.
+ *
+ *          Input parameter is a reference to a imx_pm_asm_arg structure
+ *          containing the function argument (refer to the imx_pm.h)
+ *
+ * @param[in] r0  reference to the structure imx_pm_asm_arg in normal
+ *                memory.
+ */
+FUNC imx6sx_low_power_idle, :
+	push	{r4 - r10}
+
+	/* Get the function arguments data */
+	ldr	r1, [r0, #PM_ASM_ARG_PA_ADDR_OFF]
+	ldr	r0, [r0, #PM_ASM_ARG_PM_INFO_OFF]
+
+	/*
+	 * Calculate the Physical address of the wakeup function
+	 * to initialize the SRC register
+	 */
+	ldr		r4, =imx6sx_low_power_idle
+	ldr		r3, =wakeup
+	sub		r3, r4
+	add		r3, r1
+
+	ldr	r1, [r0, #PM_INFO_PBASE_OFF]
+
+	/* store physical resume addr and pm_info address. */
+	ldr	r10, [r0, #PM_INFO_SRC_V_OFF]
+	str	r3, [r10, #0x20]
+	str	r1, [r10, #0x24]
+
+	/* save disagnostic register */
+	mrc	p15, 0, r7, c15, c0, 1
+	str	r7, [r0, #PM_INFO_SAVED_DIAGNOSTIC_OFF]
+
+	/* set ARM power to be gated */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x1
+	str	r7, [r10, #0x2a0]
+
+	disable_l1_dcache
+
+#ifdef CFG_PL310
+	/* sync L2 */
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+
+	/* Wait for background operations to complete. */
+wait_for_l2_to_idle:
+	ldr	r7, [r10, #0x730]
+	cmp	r7, #0x0
+	bne	wait_for_l2_to_idle
+
+	mov	r7, #0x0
+	str	r7, [r10, #0x730]
+	/* disable L2 */
+	str	r7, [r10, #0x100]
+
+	dsb
+	isb
+#endif
+
+	tlb_set_to_ocram
+
+	/* make sure MMDC in self-refresh */
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	mmdc_enter_dvfs_mode
+
+	/* save DDR IO settings */
+	ldr     r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr     r6, =0x0
+	ldr     r7, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr     r8, =PM_INFO_MMDC_IO_VAL_OFF
+	add     r8, r8, r0
+save_and_set_mmdc_io_lpm:
+	ldr	r9, [r8], #0x4
+	ldr	r5, [r10, r9]
+	str	r6, [r10, r9]
+	str	r5, [r8], #0x8
+	subs	r7, r7, #0x1
+	bne	save_and_set_mmdc_io_lpm
+
+	mov	r5, #0x0
+	sema4_lock
+	ccm_enter_idle
+	anatop_enter_idle
+	sema4_unlock
+
+	/*
+	 * mask all GPC interrupts before
+	 * enabling the RBC counters to
+	 * avoid the counter starting too
+	 * early if an interrupt is already
+	 * pending.
+	 */
+	ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r4, [r10, #MX6SX_GPC_IMR1]
+	ldr	r5, [r10, #MX6SX_GPC_IMR2]
+	ldr	r6, [r10, #MX6SX_GPC_IMR3]
+	ldr	r7, [r10, #MX6SX_GPC_IMR4]
+
+	ldr	r3, =0xffffffff
+	str	r3, [r10, #MX6SX_GPC_IMR1]
+	str	r3, [r10, #MX6SX_GPC_IMR2]
+	str	r3, [r10, #MX6SX_GPC_IMR3]
+	str	r3, [r10, #MX6SX_GPC_IMR4]
+
+	/*
+	 * enable the RBC bypass counter here
+	 * to hold off the interrupts. RBC counter
+	 * = 4 (120us). With this setting, the latency
+	 * from wakeup interrupt to ARM power up
+	 * is ~130uS.
+	 */
+	ldr     r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r3, [r10, #MX6SX_CCM_CCR]
+	bic	r3, r3, #(0x3f << 21)
+	orr	r3, r3, #(0x4 << 21)
+	str	r3, [r10, #MX6SX_CCM_CCR]
+
+	/* enable the counter. */
+	ldr	r3, [r10, #MX6SX_CCM_CCR]
+	orr	r3, r3, #(0x1 << 27)
+	str	r3, [r10, #MX6SX_CCM_CCR]
+
+	/* unmask all the GPC interrupts. */
+	ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	str	r4, [r10, #MX6SX_GPC_IMR1]
+	str	r5, [r10, #MX6SX_GPC_IMR2]
+	str	r6, [r10, #MX6SX_GPC_IMR3]
+	str	r7, [r10, #MX6SX_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (3usec)
+	 * ARM is at 24MHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r4, =50
+rbc_loop:
+	subs	r4, r4, #0x1
+	bne	rbc_loop
+
+	wfi
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	mov	r5, #0x0
+	sema4_lock
+	anatop_exit_idle
+	ccm_exit_idle
+	sema4_unlock
+	resume_mmdc
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	/* enable d-cache */
+	read_sctlr r7
+	orr	r7, r7, #(1 << 2)
+	write_sctlr r7
+
+#ifdef CFG_PL310
+	ldr	r10, [r0, #PM_INFO_PL310_V_OFF]
+	mov	r7, #0x1
+	/* enable L2 */
+	str	r7, [r10, #0x100]
+#endif
+
+	tlb_back_to_ddr
+
+	/* Restore registers */
+	pop	{r4 - r10}
+	bx	lr
+
+wakeup:
+	/* Invalidate Entire Instruction Cache */
+	write_icialluis
+	DSB
+	/* Flush entire branch target cache */
+	write_bpiallis
+	DSB
+	ISB
+
+	/* switch monitor mode */
+	mov	r3, #0x16
+	mov	r4, #((1 << 6) | (1 << 7))
+	orr	r3, r3, r4
+	msr	cpsr, r3
+	nop
+	nop
+	nop
+	/* enable the Icache and branch prediction */
+	mov	r1, #0x1800
+	write_sctlr r1
+	isb
+
+	/* restore disagnostic register */
+	ldr	r7, [r0, #PM_INFO_SAVED_DIAGNOSTIC_OFF]
+	mcr	p15, 0, r7, c15, c0, 1
+
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_TEE_RESUME_OFF]
+	/* clear core0's entry and parameter */
+	ldr	r10, [r0, #PM_INFO_SRC_P_OFF]
+	mov	r7, #0x0
+	str	r7, [r10, #MX6SX_SRC_GPR1]
+	str	r7, [r10, #MX6SX_SRC_GPR2]
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_P_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	mov	r5, #0x1
+	sema4_lock
+	anatop_exit_idle
+	ccm_exit_idle
+	sema4_unlock
+	resume_mmdc
+
+	/* Restore registers */
+	bx	lr
+	.ltorg
+END_FUNC imx6sx_low_power_idle
+
+/**
+ * @brief   Calculates and returns the low power idle function size
+ *
+ * @retval  function size in bytes
+ */
+FUNC get_imx6sx_low_power_idle_size, :
+	subs	r0, pc, #8
+	ldr		r1, =imx6sx_low_power_idle
+	sub		r0, r0, r1
+	bx		lr
+END_FUNC get_imx6sx_low_power_idle_size
diff --git a/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6ul.S b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6ul.S
new file mode 100644
index 000000000..e427a1676
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6ul.S
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_def.h>
+
+	.section .text.psci.cpuidle
+
+	.macro	pll_do_wait_lock
+1:
+	ldr	r7, [r10, r8]
+	ands	r7, #0x80000000
+	beq	1b
+
+	.endm
+
+	.macro	ccm_do_wait
+2:
+	ldr	r7, [r10, #0x48]
+	cmp	r7, #0x0
+	bne	2b
+
+	.endm
+
+	.macro	ccm_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+
+	/* set ahb to 3MHz */
+	ldr	r7, [r10, #0x14]
+	orr	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	/* set perclk to 6MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	orr	r7, r7, #0x3
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc to 1MHz, periph2_clk2 need to be @8MHz */
+	ldr	r7, [r10, #0x14]
+	orr     r7, r7, #0x2
+	orr	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/* bypass PLL1 output to OSC */
+	ldr	r7, [r10]
+	orr	r7, r7, #(0x1 << 16)
+	str	r7, [r10]
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+
+	/* set pll1_sw to from pll1 main */
+	ldr	r7, [r10, #0xc]
+	bic	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	/* set step from osc */
+	ldr	r7, [r10, #0xc]
+	bic	r7, r7, #0x100
+	str	r7, [r10, #0xc]
+
+	/* set pll1_sw to from step */
+	ldr	r7, [r10, #0xc]
+	orr	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/* Disable PLL1 bypass output */
+	ldr	r7, [r10]
+	bic	r7, r7, #0x12000
+	str	r7, [r10]
+
+	/*
+	 * disable pll2, suppose when system enter low
+	 * power idle mode, only 396MHz pfd needs pll2,
+	 * now we switch arm clock to OSC, we can disable
+	 * pll2 now, gate pll2_pfd2 first.
+	 */
+	ldr	r7, [r10, #0x100]
+	orr	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	ldr	r7, [r10, #0x30]
+	orr	r7, r7, #0x1000
+	bic	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	.endm
+
+	.macro	ccm_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* enable pll2 and pll2_pfd2 */
+	ldr	r7, [r10, #0x30]
+	bic	r7, r7, #0x1000
+	orr	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	ldr	r8, =0x30
+	pll_do_wait_lock
+
+	ldr	r7, [r10, #0x100]
+	bic	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	/* enable PLL1 bypass output */
+	ldr	r7, [r10]
+	orr	r7, r7, #0x12000
+	str	r7, [r10]
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_CCM_P_OFF]
+
+	/* set perclk back to 24MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x7
+	bic	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	/* set ahb div back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	/* set pll1_sw to from pll1 main */
+	ldr	r7, [r10, #0xc]
+	bic	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	/* set step from pll2_pfd2 */
+	ldr	r7, [r10, #0xc]
+	orr	r7, r7, #0x100
+	str	r7, [r10, #0xc]
+
+	/* set pll1_sw to from step */
+	ldr	r7, [r10, #0xc]
+	orr	r7, r7, #0x4
+	str	r7, [r10, #0xc]
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* Unbypass PLL1 */
+	ldr	r7, [r10]
+	bic	r7, r7, #(0x1 << 16)
+	str	r7, [r10]
+
+	.endm
+
+	.macro	anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/*
+	 * check whether any PLL is enabled, as only when
+	 * there is no PLLs enabled, 2P5 and 1P1 can be
+	 * off and only enable weak ones.
+	 */
+
+	/* arm pll1 */
+	ldr	r7, [r10, #0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* sys pll2 */
+	ldr	r7, [r10, #0x30]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb pll3 */
+	ldr	r7, [r10, #0x10]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* audio pll4 */
+	ldr	r7, [r10, #0x70]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* vidio pll5 */
+	ldr	r7, [r10, #0xa0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enet pll6 */
+	ldr	r7, [r10, #0xe0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb host pll7 */
+	ldr	r7, [r10, #0x20]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enable weak 2P5 and turn off regular 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* enable weak 1p1 and turn off regular 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+
+	/* check whether ARM LDO is bypassed */
+	ldr	r7, [r10, #0x140]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1f
+	bne	10f
+
+	/* low power band gap enable */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+
+	/* turn off the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/*
+	 * clear the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 * Per RM, should be cleared when
+	 * band gap is powered down.
+	 */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x8
+	str	r7, [r10, #0x150]
+
+	/* turn off regular bandgap */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+
+	/* switch to RC-OSC */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x40000000
+	str	r7, [r10, #0x150]
+10:
+	/* lower OSC current by 37.5% */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* disconnect vdd_high_in and vdd_snvs_in */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1000
+	str	r7, [r10, #0x150]
+
+	.endm
+
+	.macro anatop_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* increase OSC current to normal */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* turn on XTAL-OSC and detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x40000000
+	orr	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+
+	/* wait for XTAL stable */
+14:
+	ldr	r7, [r10, #0x150]
+	ands	r7, r7, #0x8000
+	beq	14b
+
+	/* switch to XTAL-OSC */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+15:
+	/* check whether we need to enable 2P5/1P1 */
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x40000
+	beq	11f
+
+	/* check whether ARM LDO is bypassed */
+	ldr	r7, [r10, #0x140]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1f
+	bne	12f
+
+	/* turn on regular bandgap and wait for stable */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+13:
+	ldr	r7, [r10, #0x150]
+	ands	r7, #0x80
+	beq	13b
+
+	/*
+	 * set the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 */
+	ldr     r7, [r10, #0x150]
+	orr     r7, r7, #0x8
+	str     r7, [r10, #0x150]
+
+	/* turn on the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/* low power band gap disable */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+12:
+	/* enable regular 2P5 and turn off weak 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* Ensure the 2P5 is up. */
+3:
+	ldr	r7, [r10, #0x130]
+	ands	r7, r7, #0x20000
+	beq	3b
+	ldr	r7, [r10, #0x130]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+
+	/* enable regular 1p1 and turn off weak 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+4:
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x20000
+	beq	4b
+	ldr	r7, [r10, #0x110]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+11:
+	.endm
+
+	.macro	disable_l1_dcache
+
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	push    {r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	mov	lr, pc
+	bx	r1
+	pop     {r0 - r10, lr}
+
+	/* disable d-cache */
+	read_sctlr r7
+	bic	r7, r7, #0x4
+	write_sctlr r7
+	dsb
+	isb
+
+	push    {r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	mov	lr, pc
+	bx	r1
+	pop     {r0 - r10, lr}
+
+	.endm
+
+	.macro mmdc_enter_dvfs_mode
+
+	/* disable automatic power savings. */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+
+	/* disable power down timer */
+	ldr	r7, [r10, #0x4]
+	bic	r7, r7, #0xff00
+	str	r7, [r10, #0x4]
+
+	/* make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+5:
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	5b
+
+	.endm
+
+	.macro	resume_mmdc
+
+	/* restore MMDC IO */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_IOMUXC_P_OFF]
+
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+6:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	add	r7, r7, #0x4
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	6b
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_MMDC0_P_OFF]
+
+	/* reset read FIFO, RST_RD_FIFO */
+	ldr	r7, =MX6Q_MMDC_MPDGCTRL0
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+7:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	7b
+
+	/* reset FIFO a second time */
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+8:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	8b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+9:
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	9b
+
+	/* enable power down timer */
+	ldr	r7, [r10, #0x4]
+	orr	r7, r7, #0x5500
+	str	r7, [r10, #0x4]
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+
+	.endm
+
+	.macro	tlb_set_to_ocram
+
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_TTBR1_OFF]
+
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_TTBR0_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
+	 *     and 2-4G is translated by TTBR1.
+	 */
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #0x800
+	write_sctlr r6
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1/TTBR0 */
+	write_ttbr1 r7
+	write_ttbr0 r7
+
+	/* Read TTBCR and set PD0=1, N = 1 */
+	read_ttbcr r6
+	orr	r6, r6, #0x10
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+	.endm
+
+	.macro	tlb_back_to_ddr
+
+	/* Restore the TTBCR */
+
+	dsb
+	isb
+
+	/* Read TTBCR and set PD0=0, N = 0 */
+	read_ttbcr r6
+	bic	r6, r6, #0x10
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	dsb
+	isb
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r6, [r0, #PM_INFO_TTBR1_OFF]
+	write_ttbr1 r6
+	ldr	r6, [r0, #PM_INFO_TTBR0_OFF]
+	write_ttbr0 r6
+	isb
+
+	.endm
+
+/* imx6ul_low_power_idle */
+
+	.align 3
+
+FUNC imx6ul_low_power_idle, :
+	push	{r4 - r10}
+
+	ldr	r1, [r0, #PM_INFO_PBASE_OFF]
+	ldr	r2, [r0, #PM_INFO_INFO_SIZE_OFF]
+
+	/*
+	 * R3 points the resume address that set in SRC GPR1
+	 */
+	ldr	r5, =imx6ul_low_power_idle
+	ldr	r6, =wakeup
+	sub	r6, r6, r5
+	add	r8, r1, r2
+	add	r3, r8, r6
+
+	ldr	r10, [r0, #PM_INFO_SRC_V_OFF]
+	str	r3, [r10, #0x20]
+	str	r1, [r10, #0x24]
+
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x1
+	str	r7, [r10, #0x2a0]
+
+	disable_l1_dcache
+
+	tlb_set_to_ocram
+
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	mmdc_enter_dvfs_mode
+
+	ldr	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr	r6, =0x0
+	ldr	r7, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r8, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r8, r8, r0
+
+save_and_set_mmdc_io_lpm:
+	ldr	r9, [r8], #0x4
+	ldr	r5, [r10, r9]
+	str	r6, [r10, r9]
+	str	r5, [r8], #0x4
+	/* We reuse the structure for suspend/resume,
+	 * the step is 12 bytes.
+	 */
+	add	r8, r8, #0x4
+	subs	r7, r7, #0x1
+	bne	save_and_set_mmdc_io_lpm
+
+	mov	r5, #0x0
+	ccm_enter_idle
+	anatop_enter_idle
+
+	/*
+	 * Mask all GPC interrupts before enabling the RBC counters
+	 * to avoid the counter starting too early if an interrupt is
+	 * already pending.
+	 */
+	 ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	 ldr	r4, [r10, #MX6Q_GPC_IMR1]
+	 ldr	r5, [r10, #MX6Q_GPC_IMR2]
+	 ldr	r6, [r10, #MX6Q_GPC_IMR3]
+	 ldr	r7, [r10, #MX6Q_GPC_IMR4]
+
+	 ldr	r3, =0xffffffff
+	 str	r3, [r10, #MX6Q_GPC_IMR1]
+	 str	r3, [r10, #MX6Q_GPC_IMR2]
+	 str	r3, [r10, #MX6Q_GPC_IMR3]
+	 str	r3, [r10, #MX6Q_GPC_IMR4]
+
+	 /*
+	  * Enable the RBC bypass counter here
+	  * to hold off the interrupts. RBC counter
+	  * = 4 (120us). With this setting, the latency
+	  * from wakeup interrupt to ARM power up
+	  * is ~130us
+	  */
+
+	  ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+	  ldr	r3, [r10, #MX6Q_CCM_CCR]
+	  bic	r3, r3, #(0x3f << 21)
+	  orr	r3, r3, #(0x20 << 21)
+	  str	r3, [r10, #MX6Q_CCM_CCR]
+
+	  ldr	r3, [r10, #MX6Q_CCM_CCR]
+	  orr	r3, r3, #(0x1 << 27)
+	  str	r3, [r10, #MX6Q_CCM_CCR]
+
+	  /* unmask all the GPC interrupts. */
+	  ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	  str	r4, [r10, #MX6Q_GPC_IMR1]
+	  str	r5, [r10, #MX6Q_GPC_IMR2]
+	  str	r6, [r10, #MX6Q_GPC_IMR3]
+	  str	r7, [r10, #MX6Q_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (3usec)
+	 * ARM is at 24MHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r4, =50
+rbc_loop:
+	subs	r4, r4, #0x1
+	bne	rbc_loop
+
+	wfi
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	mov	r5, #0x0
+	anatop_exit_idle
+	ccm_exit_idle
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	resume_mmdc
+	/* enable d-cache */
+	mrc	p15, 0, r7, c1, c0, 0
+	orr	r7, r7, #(1 << 2)
+	mcr	p15, 0, r7, c1, c0, 0
+
+	tlb_back_to_ddr
+
+	/* Restore registers */
+	pop	{r4 - r10}
+	bx	lr
+
+wakeup:
+	/* Not use arm_cl1_i_inv_all, we are in ocram now */
+	/* Invalidate Entire Instruction Cache */
+	write_icialluis
+	DSB
+	/* Flush entire branch target cache */
+	write_bpiallis
+	DSB
+	ISB
+
+	/* switch monitor mode */
+	mov	r3, #0x16
+	mov	r4, #((1 << 6) | (1 << 7))
+	orr	r3, r3, r4
+	msr	cpsr, r3
+	nop
+	nop
+	nop
+
+	/* enable the Icache and branch prediction */
+	mov	r1, #0x1800
+	write_sctlr r1
+	isb
+
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_TEE_RESUME_OFF]
+	/* clear core0's entry and parameter */
+	ldr	r10, [r0, #PM_INFO_SRC_P_OFF]
+	mov	r7, #0x0
+	str	r7, [r10, #MX6Q_SRC_GPR1]
+	str	r7, [r10, #MX6Q_SRC_GPR2]
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_P_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	mov	r5, #0x1
+	anatop_exit_idle
+	ccm_exit_idle
+	resume_mmdc
+
+	/* Restore registers, LR may points to thumb instructions */
+	bx	lr
+	.ltorg
+mx6ul_lpm_wfi_end:
+END_FUNC imx6ul_low_power_idle
diff --git a/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6ull.S b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6ull.S
new file mode 100644
index 000000000..e62cd72f5
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx6ull.S
@@ -0,0 +1,757 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_def.h>
+
+	.section .text.psci.cpuidle
+
+	.macro	pll_do_wait_lock
+1:
+	ldr	r7, [r10, r8]
+	ands	r7, #0x80000000
+	beq	1b
+
+	.endm
+
+	.macro	ccm_do_wait
+2:
+	ldr	r7, [r10, #0x48]
+	cmp	r7, #0x0
+	bne	2b
+
+	.endm
+
+	.macro	ccm_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_CCM_V_OFF]
+
+	/* set ahb to 3MHz */
+	ldr	r7, [r10, #0x14]
+	orr	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	/* set perclk to 6MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	orr	r7, r7, #0x3
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc to 1MHz, periph2_clk2 need to be @8MHz */
+	ldr	r7, [r10, #0x14]
+	orr     r7, r7, #0x2
+	orr	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/*
+	 * disable pll2, suppose when system enter low
+	 * power idle mode, only 396MHz pfd needs pll2,
+	 * now we switch arm clock to OSC, we can disable
+	 * pll2 now, gate pll2_pfd2 first.
+	 */
+	ldr	r7, [r10, #0x100]
+	orr	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	ldr	r7, [r10, #0x30]
+	orr	r7, r7, #0x1000
+	bic	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	.endm
+
+	.macro	ccm_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* enable pll2 and pll2_pfd2 */
+	ldr	r7, [r10, #0x30]
+	bic	r7, r7, #0x1000
+	orr	r7, r7, #0x2000
+	str	r7, [r10, #0x30]
+
+	ldr	r8, =0x30
+	pll_do_wait_lock
+
+	ldr	r7, [r10, #0x100]
+	bic	r7, #0x800000
+	str	r7, [r10, #0x100]
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_CCM_P_OFF]
+
+	/* set perclk back to 24MHz */
+	ldr	r7, [r10, #0x1c]
+	bic	r7, r7, #0x3f
+	str	r7, [r10, #0x1c]
+
+	/* set mmdc back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x7
+	bic	r7, r7, #(0x7 << 3)
+	str	r7, [r10, #0x14]
+
+	/* set ahb div back to 24MHz */
+	ldr	r7, [r10, #0x14]
+	bic	r7, r7, #0x1c00
+	str	r7, [r10, #0x14]
+
+	ccm_do_wait
+
+	.endm
+
+	.macro	anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+
+	/*
+	 * check whether any PLL is enabled, as only when
+	 * there is no PLLs enabled, 2P5 and 1P1 can be
+	 * off and only enable weak ones.
+	 */
+
+	/* arm pll1 */
+	ldr	r7, [r10, #0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* sys pll2 */
+	ldr	r7, [r10, #0x30]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb pll3 */
+	ldr	r7, [r10, #0x10]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* audio pll4 */
+	ldr	r7, [r10, #0x70]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* vidio pll5 */
+	ldr	r7, [r10, #0xa0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enet pll6 */
+	ldr	r7, [r10, #0xe0]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* usb host pll7 */
+	ldr	r7, [r10, #0x20]
+	ands	r7, r7, #(1 << 31)
+	bne	10f
+
+	/* enable weak 2P5 and turn off regular 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* enable weak 1p1 and turn off regular 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+
+	/* check whether ARM LDO is bypassed */
+	ldr	r7, [r10, #0x140]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1f
+	bne	10f
+
+	/* low power band gap enable */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+
+	/* turn off the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/*
+	 * clear the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 * Per RM, should be cleared when
+	 * band gap is powered down.
+	 */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x8
+	str	r7, [r10, #0x150]
+
+	/* turn off regular bandgap */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+
+10:
+	/* switch to RC-OSC */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x40000000
+	str	r7, [r10, #0x150]
+
+	/* lower OSC current by 37.5% */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* disconnect vdd_high_in and vdd_snvs_in */
+	ldr	r7, [r10, #0x150]
+	orr	r7, r7, #0x1000
+	str	r7, [r10, #0x150]
+
+	.endm
+
+	.macro anatop_exit_idle
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_ANATOP_P_OFF]
+
+	/* increase OSC current to normal */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x6000
+	str	r7, [r10, #0x150]
+
+	/* turn on XTAL-OSC and detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x40000000
+	orr	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+
+	/* wait for XTAL stable */
+14:
+	ldr	r7, [r10, #0x150]
+	ands	r7, r7, #0x8000
+	beq	14b
+
+	/* switch to XTAL-OSC */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x10
+	str	r7, [r10, #0x270]
+
+	/* turn off XTAL-OSC detector */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x10000
+	str	r7, [r10, #0x150]
+15:
+	/* check whether we need to enable 2P5/1P1 */
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x40000
+	beq	11f
+
+	/* check whether ARM LDO is bypassed */
+	ldr	r7, [r10, #0x140]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1f
+	bne	12f
+
+	/* turn on regular bandgap and wait for stable */
+	ldr	r7, [r10, #0x150]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x150]
+13:
+	ldr	r7, [r10, #0x150]
+	ands	r7, #0x80
+	beq	13b
+
+	/*
+	 * set the REFTOP_SELFBIASOFF,
+	 * self-bias circuit of the band gap.
+	 */
+	ldr     r7, [r10, #0x150]
+	orr     r7, r7, #0x8
+	str     r7, [r10, #0x150]
+
+	/* turn on the bias current from the regular bandgap */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x80
+	str	r7, [r10, #0x270]
+
+	/* low power band gap disable */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x20
+	str	r7, [r10, #0x270]
+12:
+	/* enable regular 2P5 and turn off weak 2P5 */
+	ldr	r7, [r10, #0x130]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x130]
+
+	/* Ensure the 2P5 is up. */
+3:
+	ldr	r7, [r10, #0x130]
+	ands	r7, r7, #0x20000
+	beq	3b
+	ldr	r7, [r10, #0x130]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x130]
+
+	/* enable regular 1p1 and turn off weak 1P1 */
+	ldr	r7, [r10, #0x110]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x110]
+4:
+	ldr	r7, [r10, #0x110]
+	ands	r7, r7, #0x20000
+	beq	4b
+	ldr	r7, [r10, #0x110]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x110]
+11:
+	.endm
+
+	.macro	disable_l1_dcache
+
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	push	{r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	/* disable d-cache */
+	read_sctlr r7
+	bic	r7, r7, #0x4
+	write_sctlr r7
+	dsb
+	isb
+
+	push	{r0 - r10, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	.endm
+
+	.macro mmdc_enter_dvfs_mode
+
+	/* disable automatic power savings. */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+
+	/* disable power down timer */
+	ldr	r7, [r10, #0x4]
+	bic	r7, r7, #0xff00
+	str	r7, [r10, #0x4]
+
+	/* make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+5:
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	5b
+
+	.endm
+
+	.macro	resume_mmdc
+
+	/* restore MMDC IO */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_IOMUXC_P_OFF]
+
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+6:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	add	r7, r7, #0x4
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	6b
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_MMDC0_P_OFF]
+
+	/* reset read FIFO, RST_RD_FIFO */
+	ldr	r7, =MX6Q_MMDC_MPDGCTRL0
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+7:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	7b
+
+	/* reset FIFO a second time */
+	ldr	r6, [r10, r7]
+	orr	r6, r6, #(1 << 31)
+	str	r6, [r10, r7]
+8:
+	ldr	r6, [r10, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	8b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+9:
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	9b
+
+	/* enable power down timer */
+	ldr	r7, [r10, #0x4]
+	orr	r7, r7, #0x5500
+	str	r7, [r10, #0x4]
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r10, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #MX6Q_MMDC_MAPSR]
+
+	.endm
+
+	.macro	tlb_set_to_ocram
+
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_TTBR1_OFF]
+
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_TTBR0_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
+	 *     and 2-4G is translated by TTBR1.
+	 */
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #0x800
+	write_sctlr r6
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1/0 */
+	write_ttbr1 r7
+	write_ttbr0 r7
+
+#if 1
+	/* Read TTBCR and set PD0=1, N = 1 */
+	read_ttbcr r6
+	orr	r6, r6, #0x10
+	write_ttbcr r6
+#endif
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+
+	.endm
+
+	.macro	tlb_back_to_ddr
+
+	/* Restore the TTBCR */
+
+	dsb
+	isb
+
+	/* Read TTBCR and set PD0=0, N = 0 */
+#if 1
+	read_ttbcr r6
+	bic	r6, r6, #0x10
+	write_ttbcr r6
+#endif
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	dsb
+	isb
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r6, [r0, #PM_INFO_TTBR1_OFF]
+	write_ttbr1 r6
+	ldr	r6, [r0, #PM_INFO_TTBR0_OFF]
+	write_ttbr0 r6
+	isb
+
+	.endm
+
+/* imx6ull_low_power_idle */
+
+	.align 3
+FUNC imx6ull_low_power_idle, :
+	push	{r4 - r10}
+
+	/* get necessary info from pm_info */
+	ldr	r1, [r0, #PM_INFO_PBASE_OFF]
+	ldr	r2, [r0, #PM_INFO_INFO_SIZE_OFF]
+
+	/*
+	 * counting the resume address in iram
+	 * to set it in SRC register.
+	 */
+	ldr	r5, =imx6ull_low_power_idle
+	ldr     r6, =wakeup
+	sub	r6, r6, r5
+	add     r8, r1, r2
+	add	r3, r8, r6
+
+	/* store physical resume addr and pm_info address. */
+	ldr	r10, [r0, #PM_INFO_SRC_V_OFF]
+	str	r3, [r10, #0x20]
+	str	r1, [r10, #0x24]
+
+	/* set ARM power to be gated */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x1
+	str	r7, [r10, #0x2a0]
+
+	disable_l1_dcache
+
+	tlb_set_to_ocram
+
+	/* make sure MMDC in self-refresh */
+	ldr	r10, [r0, #PM_INFO_MMDC0_V_OFF]
+	mmdc_enter_dvfs_mode
+
+	/* save DDR IO settings */
+	ldr     r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr     r6, =0x0
+	ldr     r7, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr     r8, =PM_INFO_MMDC_IO_VAL_OFF
+	add     r8, r8, r0
+save_and_set_mmdc_io_lpm:
+	ldr	r9, [r8], #0x4
+	ldr	r5, [r10, r9]
+	str	r6, [r10, r9]
+	str	r5, [r8], #0x4
+	/* We reuse the structure for suspend/resume,
+	 * the step is 12 bytes.
+	 */
+	add	r8, r8, #0x4
+	subs	r7, r7, #0x1
+	bne	save_and_set_mmdc_io_lpm
+
+	mov	r5, #0x0
+	ccm_enter_idle
+	anatop_enter_idle
+
+	/*
+	 * mask all GPC interrupts before
+	 * enabling the RBC counters to
+	 * avoid the counter starting too
+	 * early if an interupt is already
+	 * pending.
+	 */
+	ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r4, [r10, #MX6Q_GPC_IMR1]
+	ldr	r5, [r10, #MX6Q_GPC_IMR2]
+	ldr	r6, [r10, #MX6Q_GPC_IMR3]
+	ldr	r7, [r10, #MX6Q_GPC_IMR4]
+
+	ldr	r3, =0xffffffff
+	str	r3, [r10, #MX6Q_GPC_IMR1]
+	str	r3, [r10, #MX6Q_GPC_IMR2]
+	str	r3, [r10, #MX6Q_GPC_IMR3]
+	str	r3, [r10, #MX6Q_GPC_IMR4]
+
+	/*
+	 * enable the RBC bypass counter here
+	 * to hold off the interrupts. RBC counter
+	 * = 4 (120us). With this setting, the latency
+	 * from wakeup interrupt to ARM power up
+	 * is ~130uS.
+	 */
+	ldr     r10, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r3, [r10, #MX6Q_CCM_CCR]
+	bic	r3, r3, #(0x3f << 21)
+	orr	r3, r3, #(0x4 << 21)
+	str	r3, [r10, #MX6Q_CCM_CCR]
+
+	/* enable the counter. */
+	ldr	r3, [r10, #MX6Q_CCM_CCR]
+	orr	r3, r3, #(0x1 << 27)
+	str	r3, [r10, #MX6Q_CCM_CCR]
+
+	/* unmask all the GPC interrupts. */
+	ldr     r10, [r0, #PM_INFO_GPC_V_OFF]
+	str	r4, [r10, #MX6Q_GPC_IMR1]
+	str	r5, [r10, #MX6Q_GPC_IMR2]
+	str	r6, [r10, #MX6Q_GPC_IMR3]
+	str	r7, [r10, #MX6Q_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (3usec)
+	 * ARM is at 24MHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r4, =50
+rbc_loop:
+	subs	r4, r4, #0x1
+	bne	rbc_loop
+
+	wfi
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	mov	r5, #0x0
+	anatop_exit_idle
+	ccm_exit_idle
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	resume_mmdc
+	/* enable d-cache */
+	mrc	p15, 0, r7, c1, c0, 0
+	orr	r7, r7, #(1 << 2)
+	mcr	p15, 0, r7, c1, c0, 0
+
+	tlb_back_to_ddr
+
+	/* Restore registers */
+	pop	{r4 - r10}
+	bx	lr
+
+wakeup:
+	/* invalidate L1 I-cache first */
+	mov	r1, #0x0
+	mcr	p15, 0, r1, c7, c5, 0
+	mcr	p15, 0, r1, c7, c5, 0
+	mcr	p15, 0, r1, c7, c5, 6
+	/* enable the Icache and branch prediction */
+	mov	r1, #0x1800
+	mcr	p15, 0, r1, c1, c0, 0
+	isb
+
+	/* switch monitor mode */
+	mov	r3, #0x16
+	mov	r4, #((1 << 6) | (1 << 7))
+	orr	r3, r3, r4
+	msr	cpsr, r3
+	nop
+	nop
+	nop
+
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_TEE_RESUME_OFF]
+	/* clear core0's entry and parameter */
+	ldr	r10, [r0, #PM_INFO_SRC_P_OFF]
+	mov	r7, #0x0
+	str	r7, [r10, #MX6Q_SRC_GPR1]
+	str	r7, [r10, #MX6Q_SRC_GPR2]
+
+	/* clear ARM power gate setting */
+	ldr	r10, [r0, #PM_INFO_GPC_P_OFF]
+	ldr	r7, =0x0
+	str	r7, [r10, #0x2a0]
+
+	mov	r5, #0x1
+	anatop_exit_idle
+	ccm_exit_idle
+	resume_mmdc
+
+	/* Restore registers */
+	bx	lr
+	.ltorg
+mx6ull_lpm_wfi_end:
+END_FUNC imx6ull_low_power_idle
diff --git a/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx7.S b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx7.S
new file mode 100644
index 000000000..e092ce034
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/psci-cpuidle-imx7.S
@@ -0,0 +1,750 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2018-2020, 2023 NXP
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/tz_proc_def.h>
+
+#define MX7_SRC_GPR1	0x74
+#define MX7_SRC_GPR2	0x78
+#define MX7_SRC_GPR3	0x7c
+#define MX7_SRC_GPR4	0x80
+#define MX7_GPC_IMR1	0x30
+#define MX7_GPC_IMR2	0x34
+#define MX7_GPC_IMR3	0x38
+#define MX7_GPC_IMR4	0x3c
+#define DDRC_STAT	0x4
+#define DDRC_PWRCTL	0x30
+#define DDRC_DBG1	0x304
+#define DDRC_DBGCAM	0x308
+#define DDRC_PSTAT	0x3fc
+#define DDRC_PCTRL_0	0x490
+
+	.section .text.psci.cpuidle
+	.align 3
+/*
+ * imx_pen_lock
+ *
+ * The reference link of Peterson's algorithm:
+ * http://en.wikipedia.org/wiki/Peterson's_algorithm
+ *
+ * val1 = r1 = !turn (inverted from Peterson's algorithm)
+ * on cpu 0:
+ * r2 = flag[0] (in flag0)
+ * r3 = flag[1] (in flag1)
+ * on cpu1:
+ * r2 = flag[1] (in flag1)
+ * r3 = flag[0] (in flag0)
+ *
+ */
+	.macro	imx_pen_lock
+
+	mov	r8, r0
+	read_mpidr r5
+	and	r5, r5, #3
+	add	r6, r8, #PM_INFO_MX7_VAL_OFF
+	cmp	r5, #0
+	addeq	r7, r8, #PM_INFO_MX7_FLAG0_OFF
+	addeq	r8, r8, #PM_INFO_MX7_FLAG1_OFF
+	addne	r7, r8, #PM_INFO_MX7_FLAG1_OFF
+	addne	r8, r8, #PM_INFO_MX7_FLAG0_OFF
+
+	mov	r9, #1
+	str	r9, [r7]
+	dsb
+	str	r5, [r6]
+1:
+	dsb
+	ldr	r9, [r8]
+	cmp	r9, #1
+	ldreq	r9, [r6]
+	cmpeq	r9, r5
+	beq	1b
+
+	.endm
+
+	.macro	imx_pen_unlock
+
+	dsb
+	read_mpidr r6
+	and	r6, r6, #3
+	cmp	r6, #0
+	addeq	r7, r0, #PM_INFO_MX7_FLAG0_OFF
+	addne	r7, r0, #PM_INFO_MX7_FLAG1_OFF
+	mov	r9, #0
+	str	r9, [r7]
+
+	.endm
+
+	.macro  disable_l1_dcache
+
+	push	{r0 - r12, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	blx	r1
+	pop	{r0 - r12, lr}
+
+	/* disable d-cache */
+	read_sctlr r7
+	bic	r7, r7, #SCTLR_C
+	write_sctlr r7
+	dsb
+	isb
+
+	push	{r0 - r12, lr}
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	ldr	r1, =dcache_op_all
+	blx	r1
+	pop	{r0 - r12, lr}
+
+	/* TODO: handle non-SMP kernel */
+	clrex
+
+	/* Turn off SMP bit. */
+	read_actlr r8
+	bic	r8, r8, #ACTLR_SMP
+	write_actlr r8
+	isb
+	dsb
+
+	.endm
+
+	.macro	tlb_set_to_ocram
+
+	/* save ttbr */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
+
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
+
+	/*
+	 * To ensure no page table walks occur in DDR, we
+	 * have a another page table stored in IRAM that only
+	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
+	 * We need to set the TTBR1 to the new IRAM TLB.
+	 * Do the following steps:
+	 * 1. Flush the Branch Target Address Cache (BTAC)
+	 * 2. Set TTBR1 to point to IRAM page table.
+	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
+	 */
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #SCTLR_Z
+	write_sctlr r6
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r7, [r6]
+
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1/TTBR0 */
+	write_ttbr1 r7
+	write_ttbr0 r7
+
+	/* Read TTBCR and set PD0=1 */
+	read_ttbcr r6
+	orr	r6, r6, #TTBCR_PD0
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+
+	.endm
+
+	.macro	tlb_back_to_ddr
+
+	/* Read TTBCR and set PD0=0 */
+	read_ttbcr r6
+	bic	r6, r6, #TTBCR_PD0
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #SCTLR_Z
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* restore ttbr */
+	ldr	r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
+	write_ttbr1 r7
+	ldr	r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
+	write_ttbr0 r7
+
+	.endm
+
+	/* r10 must be DDRC base address */
+	.macro ddrc_enter_self_refresh
+
+	ldr	r10, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+
+	/* disable port */
+	ldr	r7, =0x0
+	str	r7, [r10, #DDRC_PCTRL_0]
+
+	/* let DDR out of self-refresh */
+	ldr	r7, =0x0
+	str	r7, [r10, #DDRC_PWRCTL]
+
+	/* wait rw port_busy clear */
+	ldr	r6, =(0x1 << 16)
+	orr	r6, r6, #0x1
+2:
+	ldr	r7, [r10, #DDRC_PSTAT]
+	ands	r7, r7, r6
+	bne	2b
+
+	ldr	r7, =0x1
+	str	r7, [r10, #DDRC_DBG1]
+
+	ldr	r6, =0x36000000
+11:
+	ldr	r7, [r10, #DDRC_DBGCAM]
+	and	r7, r7, r6
+	cmp	r7, r6
+	bne	11b
+
+	/* enter self-refresh bit 5 */
+	ldr	r7, =(0x1 << 5)
+	str	r7, [r10, #DDRC_PWRCTL]
+
+	/* wait until self-refresh mode entered */
+3:
+	ldr	r7, [r10, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x3
+	bne	3b
+4:
+	ldr	r7, [r10, #DDRC_STAT]
+	ands	r7, r7, #0x20
+	beq	4b
+
+	/* disable dram clk */
+	ldr	r7, [r10, #DDRC_PWRCTL]
+	orr	r7, r7, #(1 << 3)
+	str	r7, [r10, #DDRC_PWRCTL]
+
+	/*
+	 * TO1.1 adds feature of DDR pads power down,
+	 * although TO1.0 has no such function, but it is
+	 * NOT harmful to program GPR registers for TO1.0,
+	 * it can avoid the logic of version check in idle
+	 * thread.
+	 */
+	ldr	r10, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
+	ldr	r7, =0xf0000
+	str	r7, [r10]
+
+	/* delay 20us, measured by gpio */
+	ldr	r7, =20
+12:
+	subs	r7, r7, #0x1
+	bne	12b
+
+	.endm
+
+	/* r10 must be DDRC base address */
+	.macro ddrc_exit_self_refresh
+
+	cmp	r5, #0x1
+	ldreq	r10, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
+
+	ldr	r7, =0x0
+	str	r7, [r10]
+
+	ldr	r7, =20
+13:
+	subs	r7, r7, #0x1
+	bne	13b
+
+	cmp	r5, #0x1
+	ldreq	r10, [r0, #PM_INFO_MX7_DDRC_P_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+
+	ldr	r7, =0x0
+	str	r7, [r10, #DDRC_DBG1]
+
+	ldr	r6, =0x30000000
+14:
+	ldr	r7, [r10, #DDRC_DBGCAM]
+	and	r7, r7, r6
+	cmp	r7, r6
+	bne	14b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, =0x0
+	str	r7, [r10, #DDRC_PWRCTL]
+
+	/* wait until self-refresh mode exited */
+5:
+	ldr	r7, [r10, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x3
+	beq	5b
+
+	/* enable auto self-refresh */
+	ldr	r7, [r10, #DDRC_PWRCTL]
+	orr	r7, r7, #(1 << 0)
+	str	r7, [r10, #DDRC_PWRCTL]
+
+	ldr	r7, =0x1
+	str	r7, [r10, #DDRC_PCTRL_0]
+
+	.endm
+
+	.macro	pll_do_wait_lock
+6:
+	ldr	r7, [r10, r8]
+	ands	r7, #0x80000000
+	beq	6b
+
+	.endm
+
+	.macro ccm_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+
+	/* ungate pfd1 332m for lower axi */
+	ldr	r7, =0x8000
+	str	r7, [r10, #0xc8]
+
+	ldr	r10, [r0, #PM_INFO_MX7_CCM_V_OFF]
+
+	/* switch ARM CLK to OSC */
+	ldr	r8, =0x8000
+	ldr	r7, [r10, r8]
+	bic	r7, r7, #0x7000000
+	str	r7, [r10, r8]
+
+	/* lower AXI clk from 24MHz to 3MHz */
+	ldr	r8, =0x8800
+	ldr	r7, [r10, r8]
+	orr	r7, r7, #0x7
+	str	r7, [r10, r8]
+
+	/* lower AHB clk from 24MHz to 3MHz */
+	ldr	r8, =0x9000
+	ldr	r7, [r10, r8]
+	orr	r7, r7, #0x7
+	str	r7, [r10, r8]
+
+	/* gate dram clk */
+	ldr	r8, =0x9880
+	ldr	r7, [r10, r8]
+	bic	r7, r7, #0x10000000
+	str	r7, [r10, r8]
+
+	ldr	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+
+	/* gate pfd1 332m */
+	ldr	r7, =0x8000
+	str	r7, [r10, #0xc4]
+
+	/* gate system pll pfd div 1 */
+	ldr	r7, =0x10
+	str	r7, [r10, #0xb4]
+	/* power down ARM, 480 and DRAM PLL */
+	ldr	r7, =0x1000
+	str	r7, [r10, #0x64]
+	str	r7, [r10, #0xb4]
+	ldr	r7, =0x100000
+	str	r7, [r10, #0x74]
+
+	.endm
+
+	.macro ccm_exit_idle
+
+	cmp	r5, #0x1
+	ldreq	r10, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+
+	/* power up ARM, 480 and DRAM PLL */
+	ldr	r7, =0x1000
+	str	r7, [r10, #0x68]
+	ldr	r8, =0x60
+	pll_do_wait_lock
+
+	ldr	r7, =0x1000
+	str	r7, [r10, #0xb8]
+	ldr	r8, =0xb0
+	pll_do_wait_lock
+
+	ldr	r7, =0x100000
+	str	r7, [r10, #0x78]
+	ldr	r8, =0x70
+	pll_do_wait_lock
+
+	/* ungate pfd1 332m for lower axi */
+	ldr	r7, =0x8000
+	str	r7, [r10, #0xc8]
+
+	/* ungate system pll pfd div 1 */
+	ldr	r7, =0x10
+	str	r7, [r10, #0xb8]
+
+	cmp	r5, #0x1
+	ldreq	r10, [r0, #PM_INFO_MX7_CCM_P_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_CCM_V_OFF]
+
+	/* switch ARM CLK to PLL */
+	ldr	r8, =0x8000
+	ldr	r7, [r10, r8]
+	orr	r7, r7, #0x1000000
+	str	r7, [r10, r8]
+
+	/* restore AXI clk from 3MHz to 24MHz */
+	ldr	r8, =0x8800
+	ldr	r7, [r10, r8]
+	bic	r7, r7, #0x7
+	str	r7, [r10, r8]
+
+	/* restore AHB clk from 3MHz to 24MHz */
+	ldr	r8, =0x9000
+	ldr	r7, [r10, r8]
+	bic	r7, r7, #0x7
+	str	r7, [r10, r8]
+
+	/* ungate dram clk */
+	ldr	r8, =0x9880
+	ldr	r7, [r10, r8]
+	orr	r7, r7, #0x10000000
+	str	r7, [r10, r8]
+
+	cmp	r5, #0x1
+	ldreq	r10, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+
+	/* gate pfd1 332m for lower axi */
+	ldr	r7, =0x8000
+	str	r7, [r10, #0xc4]
+
+	.endm
+
+	.macro anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+
+	/* XTAL to RC-OSC switch */
+	ldr	r7, [r10]
+	orr	r7, r7, #0x1000
+	str	r7, [r10]
+	/* power down XTAL */
+	ldr	r7, [r10]
+	orr	r7, r7, #0x1
+	str	r7, [r10]
+
+	/* enable weak 1P0A */
+	ldr	r7, [r10, #0x200]
+	orr	r7, r7, #0x40000
+	str	r7, [r10, #0x200]
+
+	/* disable LDO 1P0A */
+	ldr	r7, [r10, #0x200]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x200]
+
+	/* disable LDO 1P2 */
+	ldr	r7, [r10, #0x220]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x220]
+
+	/* switch to low power bandgap */
+	ldr	r7, [r10, #0x270]
+	orr	r7, r7, #0x400
+	str	r7, [r10, #0x270]
+	/* power down normal bandgap */
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x270]
+
+	.endm
+
+	.macro anatop_exit_idle
+
+	cmp	r5, #0x1
+	ldreq	r10, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+
+	/* power on normal bandgap */
+	ldr	r7, [r10, #0x270]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #0x270]
+	/* switch to normal bandgap */
+	bic	r7, r7, #0x400
+	str	r7, [r10, #0x270]
+
+	/* enable LDO 1P2 */
+	ldr	r7, [r10, #0x220]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x220]
+7:
+	ldr	r7, [r10, #0x220]
+	ands	r7, #0x20000
+	beq	7b
+
+	/* enable LDO 1P0A */
+	ldr	r7, [r10, #0x200]
+	orr	r7, r7, #0x1
+	str	r7, [r10, #0x200]
+9:
+	ldr	r7, [r10, #0x200]
+	ands	r7, #0x20000
+	beq	9b
+	/* disable weak 1P0A */
+	ldr	r7, [r10, #0x200]
+	bic	r7, r7, #0x40000
+	str	r7, [r10, #0x200]
+
+	/* power up XTAL and wait */
+	ldr	r7, [r10]
+	bic	r7, r7, #0x1
+	str	r7, [r10]
+10:
+	ldr	r7, [r10]
+	ands	r7, r7, #0x4
+	beq	10b
+	/* RC-OSC to XTAL switch */
+	ldr	r7, [r10]
+	bic	r7, r7, #0x1000
+	str	r7, [r10]
+
+	.endm
+
+.extern iram_tlb_phys_addr
+
+FUNC imx7d_low_power_idle, :
+	push	{r0 - r12}
+
+	/* get necessary info from pm_info */
+	ldr	r1, [r0, #PM_INFO_MX7_PBASE_OFF]
+	ldr	r2, [r0, #PM_INFO_MX7_SIZE_OFF]
+
+	/*
+	 * counting the resume address in iram
+	 * to set it in SRC register.
+	 */
+	ldr	r5, =imx7d_low_power_idle
+	ldr     r6, =wakeup
+	sub	r6, r6, r5
+	add     r8, r1, r2
+	add	r3, r8, r6
+
+	/* r11 is cpu id */
+	read_mpidr r11
+	and	r11, r11, #3
+	cmp	r11, #0x0
+	ldreq	r6, =MX7_SRC_GPR1
+	ldreq	r7, =MX7_SRC_GPR2
+	ldrne	r6, =MX7_SRC_GPR3
+	ldrne	r7, =MX7_SRC_GPR4
+	/* store physical resume addr and pm_info address. */
+	ldr	r10, [r0, #PM_INFO_MX7_SRC_V_OFF]
+	str	r3, [r10, r6]
+	str	r1, [r10, r7]
+
+	disable_l1_dcache
+
+	tlb_set_to_ocram
+
+	/* check last to sleep */
+	ldr	r6, [r0, #PM_INFO_MX7_NUM_ONLINE_CPUS_OFF]
+	ldr	r7, [r0, #PM_INFO_MX7_NUM_LPI_CPUS_OFF]
+	cmp	r6, r7
+	bne	lpi_enter_done
+
+	ddrc_enter_self_refresh
+	ccm_enter_idle
+	anatop_enter_idle
+
+	ldr	r10, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
+	ldr	r7, =0x0
+	ldr	r8, =0x1000
+	str	r7, [r10, r8]
+
+	ldr	r10, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r4, [r10, #MX7_GPC_IMR1]
+	ldr	r5, [r10, #MX7_GPC_IMR2]
+	ldr	r6, [r10, #MX7_GPC_IMR3]
+	ldr	r7, [r10, #MX7_GPC_IMR4]
+
+	ldr	r8, =0xffffffff
+	str	r8, [r10, #MX7_GPC_IMR1]
+	str	r8, [r10, #MX7_GPC_IMR2]
+	str	r8, [r10, #MX7_GPC_IMR3]
+	str	r8, [r10, #MX7_GPC_IMR4]
+
+	/*
+	 * enable the RBC bypass counter here
+	 * to hold off the interrupts. RBC counter
+	 * = 8 (240us). With this setting, the latency
+	 * from wakeup interrupt to ARM power up
+	 * is ~250uS.
+	 */
+	ldr	r8, [r10, #0x14]
+	bic	r8, r8, #(0x3f << 24)
+	orr	r8, r8, #(0x8 << 24)
+	str	r8, [r10, #0x14]
+
+	/* enable the counter. */
+	ldr	r8, [r10, #0x14]
+	orr	r8, r8, #(0x1 << 30)
+	str	r8, [r10, #0x14]
+
+	/* unmask all the GPC interrupts. */
+	str	r4, [r10, #MX7_GPC_IMR1]
+	str	r5, [r10, #MX7_GPC_IMR2]
+	str	r6, [r10, #MX7_GPC_IMR3]
+	str	r7, [r10, #MX7_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (30usec)
+	 * ARM is at 24MHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r4, =5
+rbc_loop:
+	subs	r4, r4, #0x1
+	bne	rbc_loop
+
+lpi_enter_done:
+	imx_pen_unlock
+
+        wfi
+	isb
+
+	imx_pen_lock
+
+	/* check first to wake */
+	ldr	r6, [r0, #PM_INFO_MX7_NUM_ONLINE_CPUS_OFF]
+	ldr	r7, [r0, #PM_INFO_MX7_NUM_LPI_CPUS_OFF]
+	cmp	r6, r7
+	bne	skip_lpi_flow
+
+	ldr	r5, =0x0
+	anatop_exit_idle
+	ccm_exit_idle
+	ddrc_exit_self_refresh
+
+	ldr	r10, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
+	ldr	r7, =0x3
+	ldr	r8, =0x1000
+	str	r7, [r10, r8]
+skip_lpi_flow:
+
+	tlb_back_to_ddr
+
+	/* TODO: handle non-SMP kernel */
+	/* Turn on SMP bit. */
+	read_actlr r7
+	orr	r7, r7, #ACTLR_SMP
+	write_actlr r7
+
+	isb
+
+	/* enable d-cache */
+	read_sctlr r7
+	orr	r7, r7, #SCTLR_C
+	write_sctlr r7
+	dsb
+	isb
+
+	/* Restore registers */
+	pop	{r0 - r12}
+	bx	lr
+
+wakeup:
+	/* invalidate L1 I-cache first */
+	write_iciallu
+	write_bpiall
+	/* enable the Icache and branch prediction */
+	mov	r1, #(SCTLR_I | SCTLR_Z)
+	write_sctlr r1
+	isb
+
+	/* switch monitor mode */
+	cps	#CPSR_MODE_MON
+
+	imx_pen_lock
+	/* check first to wake */
+	ldr	r6, [r0, #PM_INFO_MX7_NUM_ONLINE_CPUS_OFF]
+	ldr	r7, [r0, #PM_INFO_MX7_NUM_LPI_CPUS_OFF]
+	cmp	r6, r7
+	bne	wakeup_skip_lpi_flow
+
+	ldr	r5, =0x1
+	anatop_exit_idle
+	ccm_exit_idle
+	ddrc_exit_self_refresh
+wakeup_skip_lpi_flow:
+
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_MX7_RESUME_ADDR_OFF]
+
+	/* Restore registers */
+	bx	lr
+END_FUNC imx7d_low_power_idle
+
+/*
+ * Note: OPTEE VA = PA, for TEE_RAM.
+ * This maybe changed in future.
+ */
+FUNC v7_cpu_resume, :
+	mov     r0, #0	@ ; write the cache size selection register to be
+	write_csselr r0	@ ; sure we address the data cache
+	isb		@ ; isb to sync the change to the cachesizeid reg
+
+_inv_dcache_off:
+	mov     r0, #0	@ ; set way number to 0
+_inv_nextway:
+	mov     r1, #0	@ ; set line number (=index) to 0
+_inv_nextline:
+	orr     r2, r0, r1	@ ; construct way/index value
+	write_dcisw r2 @ ; invalidate data or unified cache line by set/way
+	add     r1, r1, #1 << LINE_FIELD_OFFSET	@ ; increment the index
+	cmp     r1, #1 << LINE_FIELD_OVERFLOW	@ ; overflow out of set field
+	bne     _inv_nextline
+	add     r0, r0, #1 << WAY_FIELD_OFFSET	@ ; increment the way number
+	cmp     r0, #0				@ ; overflow out of way field
+	bne     _inv_nextway
+
+	dsb					@ ; synchronise
+
+	/*
+	 * no stack, scratch r0-r3
+	 * TODO: need to use specific configure, but not plat_xxx.
+	 * because plat_xx maybe changed in future, we can not rely on it.
+	 * need handle sp carefully.
+	 */
+	blx plat_cpu_reset_early
+
+	b	sm_pm_cpu_resume
+END_FUNC v7_cpu_resume
diff --git a/core/drivers/pm/imx/cpuidle/sub.mk b/core/drivers/pm/imx/cpuidle/sub.mk
new file mode 100644
index 000000000..b8cf147f4
--- /dev/null
+++ b/core/drivers/pm/imx/cpuidle/sub.mk
@@ -0,0 +1,7 @@
+incdirs-y += ./..
+srcs-$(CFG_MX7) += cpuidle-imx7d.c psci-cpuidle-imx7.S
+srcs-$(CFG_MX6UL) += psci-cpuidle-imx6ul.S cpuidle-imx6ul.c
+srcs-$(CFG_MX6ULL) += psci-cpuidle-imx6ull.S cpuidle-imx6ul.c
+srcs-$(CFG_MX6SX) += psci-cpuidle-imx6sx.S cpuidle-imx6sx.c
+srcs-$(CFG_MX6SL) += psci-cpuidle-imx6sl.S cpuidle-imx6sl.c
+srcs-$(CFG_MX6SLL) += psci-cpuidle-imx6sll.S cpuidle-imx6sll.c
diff --git a/core/drivers/pm/imx/imx_ocram.c b/core/drivers/pm/imx/imx_ocram.c
new file mode 100644
index 000000000..488700f57
--- /dev/null
+++ b/core/drivers/pm/imx/imx_ocram.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2019 NXP
+ */
+
+#include <kernel/panic.h>
+#include <initcall.h>
+#include <trace.h>
+#include <mm/core_memprot.h>
+#include <string.h>
+#include <io.h>
+#include <imx.h>
+#include <imx_pm.h>
+#ifdef CFG_DT
+#include <libfdt.h>
+#include <kernel/boot.h>
+#include <kernel/dt.h>
+#endif /* CFG_DT */
+
+paddr_t iram_tlb_phys_addr = -1UL;
+
+#define DT_U32_PROP_SIZE	4
+#define DT_NUM_PROP_MAX		5
+
+#ifdef CFG_DT
+/*
+ * dt_overwrite() overwrites specified properties in the device tree.
+ * Properties to overwrite and new properties are also specified in
+ * the device tree.
+ *
+ * The format is the following :
+ *
+ * overw_str = <&node_1 prop_1 ... prop_n>,
+ *		<&node_2 prop_1 ... prop_n>,
+ *		...
+ *		<&node_n prop_1 ... prop_n>;
+ *
+ * prop_n variables are 32bit integers.
+ *
+ * @node_str: Compatible string of optee node.
+ * @prop_str: Property to overwrite (reg, clocks, ...).
+ * @overw_str: Array in node_str containing new values to overwrite property
+ *		specified prop_str.
+ * @size_prop: Size of the property to overwrite.
+ */
+static void dt_overwrite(const char *node_str, const char *prop_str,
+				const char *overw_str, uint32_t size_prop)
+{
+	void *fdt, *prop;
+	uint32_t *fdt_overw_node;
+	int offset, phandle, ret, len;
+	uint32_t i, j, sub_node, sub[DT_NUM_PROP_MAX];
+
+	if (size_prop > DT_NUM_PROP_MAX)
+		EMSG("Number of properties to overwrite is too high");
+
+	if (node_str == NULL) {
+		EMSG("Compatible string empty");
+		return;
+	}
+
+	/* Get dtb */
+	fdt = get_dt();
+	if (fdt == NULL) {
+		EMSG("No DTB found");
+		return;
+	}
+
+	/* Get offset of node_str */
+	offset = fdt_node_offset_by_compatible(fdt, 0, node_str);
+	if (offset < 0) {
+		EMSG("Cannot find %s node in the device tree", node_str);
+		return;
+	}
+
+	/* Get overw_node property */
+	fdt_overw_node = (uint32_t *)fdt_getprop(fdt, offset, overw_str, &len);
+	if (!fdt_overw_node) {
+		DMSG("%s not found in %s", overw_str, node_str);
+		return;
+	}
+	sub_node = len / (size_prop * DT_U32_PROP_SIZE);
+
+	/* For each node to substitute */
+	for (i = 0; i < sub_node; i++) {
+
+		/* fdt_overw_node[0] has to be the phandle */
+		phandle = fdt32_to_cpu(fdt_overw_node[i*size_prop]);
+
+		/* Save new properties starting from fdt_overw_node[1] */
+		for (j = 0; j < size_prop; j++)
+			sub[j] = fdt_overw_node[i*size_prop+j+1];
+
+		/* Get offset based on the phandle */
+		offset = fdt_node_offset_by_phandle(fdt, phandle);
+		if (offset < 0) {
+			EMSG("Cannot find offset based on phandle");
+			return;
+		}
+
+		/* Get size of prop_str */
+		prop = fdt_getprop_w(fdt, offset, prop_str, &len);
+		if (!prop) {
+			DMSG("There is no property %s in the phandle %d",
+							prop_str, phandle);
+			return;
+		}
+
+		/* Replace prop_str property */
+		ret = fdt_setprop_inplace(fdt, offset, prop_str, sub, len);
+		if (ret) {
+			EMSG("Error setprop inplace ret=%d", ret);
+			return;
+		}
+	}
+}
+#endif /* CFG_DT */
+
+#if defined(CFG_MX7) || defined(CFG_MX6)
+
+#ifdef TRUSTZONE_OCRAM_START
+register_phys_mem(MEM_AREA_TEE_COHERENT,
+		  ROUNDDOWN(TRUSTZONE_OCRAM_START, CORE_MMU_PGDIR_SIZE),
+		  CORE_MMU_PGDIR_SIZE);
+#endif
+
+static paddr_t ocram_tz_start_addr = -1UL;
+
+paddr_t imx_get_ocram_tz_start_addr(void)
+{
+	if (ocram_tz_start_addr != -1UL)
+		return ocram_tz_start_addr;
+	else
+		return TRUSTZONE_OCRAM_START;
+}
+
+#ifdef CFG_DT
+/*
+ * List of nodes to include in the OCRAM TZ space.
+ * The lowest node in the OCRAM will define the ocram_tz_start_addr.
+ * The lowest node must have a 4k aligned address.
+ * If this list is empty, imx_get_ocram_tz_start_addr() will return the
+ * default TRUSTZONE_OCRAM_START.
+ */
+static const char * const tz_ocram_match[] = {
+	"fsl,optee-lpm-sram",
+	NULL,
+};
+
+/*
+ * Find the lowest address among nodes listed in tz_ocram_match[]. The lowest
+ * address will define the start address of the Trustzone protected ocram space.
+ * If something goes wrong during dtb parsing, the system panics.
+ * If tz_ocram_match[] is empty, we return -1UL. In that case,
+ * imx_get_ocram_tz_start_addr() will return TRUSTZONE_OCRAM_START by default.
+ */
+static void dt_find_ocram_tz_addr(void)
+{
+	void *fdt;
+	int offset;
+	paddr_t start_addr = -1UL;
+	paddr_t tmp_addr = 0;
+	uint32_t idx = 0;
+
+	/* Get device tree blob */
+	fdt = get_dt();
+	if (fdt == NULL)
+		panic("No DTB found");
+
+	while (tz_ocram_match[idx] != NULL) {
+		/* Get node */
+		offset = fdt_node_offset_by_compatible(fdt, 0,
+							tz_ocram_match[idx]);
+		if (offset < 0) {
+			EMSG("Cannot find %s node in the device tree",
+							tz_ocram_match[idx]);
+			panic();
+		}
+
+		/* Get address in "reg" property */
+		tmp_addr = fdt_reg_base_address(fdt, offset);
+		if (!tmp_addr) {
+			EMSG("Cannot get reg property of %s",
+							tz_ocram_match[idx]);
+			panic();
+		}
+
+		/* Addresses must be 4Kbytes aligned to be TZ*/
+		if (tmp_addr & 0xFFF) {
+			EMSG("%s address is not 4Kbytes aligned",
+							tz_ocram_match[idx]);
+			panic();
+		}
+
+		/*
+		 * Get the lowest address among nodes to protect listed in
+		 * tz_ocram_match[].
+		 */
+		if ((tmp_addr < start_addr) || (start_addr == -1UL))
+			start_addr = tmp_addr;
+
+		idx++;
+	}
+	ocram_tz_start_addr = start_addr;
+}
+#endif /* CFG_DT */
+#endif /* CFG_MX6 || CFG_MX7 */
+
+#ifdef CFG_MX6
+static const paddr_t phys_addr_imx6ull[] = {
+	AIPS1_BASE, AIPS2_BASE, AIPS3_BASE, 0
+};
+
+static const paddr_t phys_addr_imx6ul[] = {
+	AIPS1_BASE, AIPS2_BASE, 0
+};
+
+static const paddr_t phys_addr_imx6sl[] = {
+	PL310_BASE, AIPS1_BASE, AIPS2_BASE, 0
+};
+
+static const paddr_t phys_addr_imx6[] = {
+	PL310_BASE, AIPS1_BASE, AIPS2_BASE, AIPS3_BASE, 0
+};
+
+static void init_tz_ocram(void)
+{
+	/* Configure the Secure OCRAM granularity */
+	vaddr_t  iomux_base;
+	uint32_t val = 0;
+	uint32_t lock = 0;
+	uint32_t lock_val = 0;
+
+#ifdef CFG_DT
+	dt_find_ocram_tz_addr();
+#endif
+
+	if (soc_is_imx6ul() || soc_is_imx6ull() ||
+		soc_is_imx6sx() || soc_is_imx6sll()) {
+		iomux_base = core_mmu_get_va(IOMUXC_GPR_BASE, MEM_AREA_IO_SEC,
+					     IOMUXC_SIZE);
+	} else {
+		iomux_base = core_mmu_get_va(IOMUXC_BASE, MEM_AREA_IO_SEC,
+					     IOMUXC_SIZE);
+	}
+
+	/* Read the current value */
+	val = io_read32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID));
+
+	/* Caluclate the OCRAM split configuration */
+	if (soc_is_imx6ul() || soc_is_imx6ull() ||
+		soc_is_imx6sl() || soc_is_imx6sll()) {
+		val &= ~BM_IOMUX_GPR_OCRAM_TZ_ADDR_6UL;
+		// Address is 4 Kbytes granularity
+		val |= (((imx_get_ocram_tz_start_addr() >> 12) <<
+				BP_IOMUX_GPR_OCRAM_TZ_ADDR_6UL) &
+				BM_IOMUX_GPR_OCRAM_TZ_ADDR_6UL);
+		// Enable
+		val |= IOMUX_GPR_OCRAM_TZ_ENABLE_6UL;
+		lock = BM_IOMUX_GPR_OCRAM_TZ_ADDR_6UL |
+			IOMUX_GPR_OCRAM_TZ_ENABLE_6UL;
+	} else if (soc_is_imx6sx()) {
+		/*
+		 * For IMX6SX, use S_OCRAM so don't need to lock configuration
+		 * Just ensure that OCRAM is full released
+		 */
+		val &= ~BM_IOMUX_GPR_OCRAM_TZ_ADDR_6SX;
+		val &= ~IOMUX_GPR_OCRAM_TZ_ENABLE_6SX;
+	} else if (soc_is_imx6sdl()) {
+		val &= ~BM_IOMUX_GPR_OCRAM_TZ_ADDR_6DL;
+		// Address is 4 Kbytes granularity
+		val |= (((imx_get_ocram_tz_start_addr() >> 12) <<
+				BP_IOMUX_GPR_OCRAM_TZ_ADDR_6DL) &
+				BM_IOMUX_GPR_OCRAM_TZ_ADDR_6DL);
+		// Enable
+		val |= IOMUX_GPR_OCRAM_TZ_ENABLE_6DL;
+		lock = BM_IOMUX_GPR_OCRAM_TZ_ADDR_6DL |
+			IOMUX_GPR_OCRAM_TZ_ENABLE_6SX;
+	} else {
+		val &= ~BM_IOMUX_GPR_OCRAM_TZ_ADDR;
+		// Address is 4 Kbytes granularity
+		val |= (((imx_get_ocram_tz_start_addr() >> 12) <<
+				BP_IOMUX_GPR_OCRAM_TZ_ADDR) &
+				BM_IOMUX_GPR_OCRAM_TZ_ADDR);
+		// Enable
+		val |= IOMUX_GPR_OCRAM_TZ_ENABLE;
+		lock = BM_IOMUX_GPR_OCRAM_TZ_ADDR |
+			IOMUX_GPR_OCRAM_TZ_ENABLE;
+	}
+
+	/* Write the configuration */
+	io_write32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID), val);
+
+	/* Then lock configuration */
+	/* Normally the lock bits are not defined for 6UL and 6SX */
+	io_write32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID),
+		IOMUX_GPR_OCRAM_LOCK(lock) | val);
+
+	/*
+	 * Ensure that GPR registers for OCRAM TZ protection locked
+	 * match with the current configuration.
+	 */
+	if (!soc_is_imx6ul() & !soc_is_imx6sx()) {
+		lock_val = io_read32(iomux_base
+				+ IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID));
+		if ((lock_val & lock) != (val & lock))
+			panic("OCRAM TZ Configuration Lock Mismatch");
+	}
+
+	if (soc_is_imx6sx()) {
+		val = io_read32(iomux_base
+				+ IOMUX_GPRx_OFFSET(IOMUX_GPR_S_OCRAM_ID));
+
+		val &= ~BM_IOMUX_GPR_S_OCRAM_TZ_ADDR_6SX;
+		// Address is 4 Kbytes granularity
+		val |= (((imx_get_ocram_tz_start_addr() >> 12) <<
+				BP_IOMUX_GPR_S_OCRAM_TZ_ADDR_6SX) &
+				BM_IOMUX_GPR_S_OCRAM_TZ_ADDR_6SX);
+		// Enable
+		val |= IOMUX_GPR_S_OCRAM_TZ_ENABLE_6SX;
+
+		io_write32(iomux_base
+				+ IOMUX_GPRx_OFFSET(IOMUX_GPR_S_OCRAM_ID), val);
+
+		/* Lock the OCRAM_S */
+		lock = BM_IOMUX_GPR_OCRAM_TZ_ADDR_6UL |
+		       IOMUX_GPR_OCRAM_TZ_ENABLE_6UL;
+		io_setbits32(iomux_base +
+				IOMUX_GPRx_OFFSET(IOMUX_GPR_S_OCRAM_ID),
+			     IOMUX_GPR_OCRAM_LOCK(lock));
+	}
+}
+
+static TEE_Result init_ocram(void)
+{
+	struct tee_mmap_region map;
+	const paddr_t *phys_addr;
+	paddr_t       iram_base;
+	void          *iram_tlb_vaddr;
+
+	DMSG("IRAM TLB phys addr = 0x%X", (uint32_t)iram_tlb_phys_addr);
+
+	/* iram tlb already initialized */
+	if (iram_tlb_phys_addr != (-1UL))
+		return TEE_SUCCESS;
+
+	/* Initialize the Secure OCRAM */
+	init_tz_ocram();
+
+#ifdef CFG_DT
+	/* Move ocram nodes to the OCRAM for Linux */
+	dt_overwrite("fsl,optee-lpm-sram", "reg", "overw_reg", 3);
+
+	if (soc_is_imx6sx())
+		dt_overwrite("fsl,optee-lpm-sram", "clocks", "overw_clock", 3);
+#endif
+
+	if (soc_is_imx6sx()) {
+		iram_base = ROUNDDOWN(IRAM_6SX_S_BASE, CORE_MMU_PGDIR_SIZE);
+		/* First we need to map the Secure RAM */
+		if (!core_mmu_add_mapping(MEM_AREA_TEE_COHERENT, iram_base,
+							IRAM_6SX_S_SIZE)) {
+			panic("Failed to map Secure OCRAM");
+		}
+	} else {
+		iram_base = IRAM_BASE;
+	}
+
+	if (soc_is_imx6sx())
+		iram_tlb_phys_addr = imx_get_ocram_tz_start_addr();
+	else
+		iram_tlb_phys_addr = imx_get_ocram_tz_start_addr()
+							+ IRAM_TBL_OFFSET;
+
+	iram_tlb_vaddr = phys_to_virt(iram_tlb_phys_addr,
+			MEM_AREA_TEE_COHERENT,
+			16 * 1024);
+	if (!iram_tlb_vaddr) {
+		panic("Failed to map Trustzone OCRAM");
+	}
+
+	/* 16KB */
+	memset(iram_tlb_vaddr, 0, 16 * 1024);
+
+	/* Get the array of the area to be mapped */
+	if (soc_is_imx6ull()) {
+		phys_addr = phys_addr_imx6ull;
+	} else if (soc_is_imx6ul()) {
+		phys_addr = phys_addr_imx6ul;
+	} else if (soc_is_imx6sl()) {
+		phys_addr = phys_addr_imx6sl;
+	} else {
+		phys_addr = phys_addr_imx6;
+	}
+
+	do {
+		map.pa = ROUNDDOWN(*phys_addr, CORE_MMU_PGDIR_SIZE);
+		map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_IO_SEC,
+					       CORE_MMU_PGDIR_SIZE);
+		map.region_size = CORE_MMU_PGDIR_SIZE;
+		map.size = CORE_MMU_PGDIR_SIZE;
+		map.type = MEM_AREA_IO_SEC;
+		map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
+				TEE_MATTR_GLOBAL | TEE_MATTR_SECURE |
+				(TEE_MATTR_MEM_TYPE_DEV <<
+					TEE_MATTR_MEM_TYPE_SHIFT);
+		map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+	} while (*(++phys_addr));
+
+	map.pa = iram_base;
+	map.va = (vaddr_t)phys_to_virt(iram_base, MEM_AREA_TEE_COHERENT,
+				       CORE_MMU_PGDIR_SIZE);
+	map.region_size = CORE_MMU_PGDIR_SIZE;
+	map.size = CORE_MMU_PGDIR_SIZE;
+	map.type = MEM_AREA_TEE_COHERENT;
+	map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
+				TEE_MATTR_GLOBAL | TEE_MATTR_SECURE |
+				TEE_MATTR_PX;
+	map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+
+	/*
+	 * Note: No map DRAM space, DRAM is in auto-selfrefresh,
+	 * If map DRAM in to MMU, mmu will access DRAM which
+	 * hang system.
+	 */
+
+	return TEE_SUCCESS;
+}
+#elif defined(CFG_MX7) || defined(CFG_MX7ULP)
+
+#ifdef CFG_MX7
+static const paddr_t phys_addr_imx7[] = {
+	AIPS1_BASE, AIPS2_BASE, AIPS3_BASE, 0
+};
+#endif
+#ifdef CFG_MX7ULP
+static const paddr_t phys_addr_imx7ulp[] = {
+	AIPS0_BASE, AIPS1_BASE, 0
+};
+#endif
+
+static void init_tz_ocram(void)
+{
+#ifndef CFG_MX7ULP
+	/* Configure the Secure OCRAM granularity */
+	vaddr_t  iomux_base;
+	uint32_t val;
+	uint32_t lock;
+	uint32_t lock_val;
+
+#ifdef CFG_DT
+	/* Get low tz ocram address */
+	dt_find_ocram_tz_addr();
+#endif
+
+	iomux_base = (vaddr_t)phys_to_virt(IOMUXC_GPR_BASE, MEM_AREA_IO_SEC,
+					   IOMUXC_SIZE);
+
+	val = io_read32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID));
+
+	/* Configure the OCRAM Retention to start at offset 0 */
+	val &= ~BM_IOMUX_GPR_OCRAM_S_TZ_ADDR;
+	// Address is 4 Kbytes granularity
+	val |= (((imx_get_ocram_tz_start_addr() >> 12) <<
+			BP_IOMUX_GPR_OCRAM_S_TZ_ADDR) &
+			BM_IOMUX_GPR_OCRAM_S_TZ_ADDR);
+	val |= IOMUX_GPR_OCRAM_S_TZ_ENABLE;
+
+	lock = BM_IOMUX_GPR_OCRAM_S_TZ_ADDR | IOMUX_GPR_OCRAM_S_TZ_ENABLE;
+
+	/* Check if GPR registers for OCRAM TZ protection are locked */
+	if (IOMUX_GPR_OCRAM_LOCK(lock) & val) {
+		EMSG("GPR Registers for OCRAM TZ Configuration locked");
+		panic();
+	}
+
+	io_write32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID), val);
+
+	/* Then lock configuration */
+	io_write32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID),
+				IOMUX_GPR_OCRAM_LOCK(lock) | val);
+
+	/*
+	 * Ensure that GPR registers for OCRAM TZ protection locked
+	 * match with the current configuration.
+	 */
+	lock_val =
+		io_read32(iomux_base + IOMUX_GPRx_OFFSET(IOMUX_GPR_OCRAM_ID));
+	if ((lock_val & lock) != (val & lock))
+		panic("OCRAM TZ Configuration Lock Mismatch");
+#endif
+}
+
+static TEE_Result init_ocram(void)
+{
+	struct tee_mmap_region map;
+	const paddr_t *phys_addr;
+	size_t size_area;
+	void *iram_tlb_vaddr;
+
+	DMSG("IRAM TLB phys addr = 0x%X", (uint32_t)iram_tlb_phys_addr);
+
+	/* iram tlb already initialized */
+	if (iram_tlb_phys_addr != (-1UL))
+		return TEE_SUCCESS;
+
+	/* Initialize the Secure OCRAM */
+	init_tz_ocram();
+
+#ifdef CFG_DT
+	/* Move ocram nodes to the OCRAM for Linux */
+	if (soc_is_imx7ds()) {
+		dt_overwrite("fsl,optee-lpm-sram", "reg", "overw_reg", 3);
+		dt_overwrite("fsl,optee-lpm-sram", "clocks", "overw_clock", 3);
+	}
+#endif
+
+#ifdef CFG_MX7
+	iram_tlb_phys_addr = imx_get_ocram_tz_start_addr() + IRAM_TBL_OFFSET;
+	phys_addr = phys_addr_imx7;
+	size_area = AIPS1_SIZE; /* 4M for AIPS1/2/3 */
+#endif
+#ifdef CFG_MX7ULP
+	iram_tlb_phys_addr = LP_OCRAM_START;
+	phys_addr = phys_addr_imx7ulp;
+	size_area = AIPS1_SIZE; /* 8M for AIPS0/1 */
+#endif
+
+	iram_tlb_vaddr =
+		phys_to_virt(iram_tlb_phys_addr, MEM_AREA_TEE_COHERENT,
+		16 * 1024);
+
+	/* 16KB */
+	DMSG("%x %x\n", (uint32_t)iram_tlb_phys_addr, (uint32_t)iram_tlb_vaddr);
+	memset(iram_tlb_vaddr, 0, 16 * 1024);
+
+	do {
+		map.pa = *phys_addr;
+		map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_IO_SEC,
+					       CORE_MMU_PGDIR_SIZE);
+		map.region_size = CORE_MMU_PGDIR_SIZE;
+		map.size = size_area;
+		map.type = MEM_AREA_IO_SEC;
+		map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
+				TEE_MATTR_GLOBAL | TEE_MATTR_SECURE |
+				(TEE_MATTR_MEM_TYPE_DEV <<
+					TEE_MATTR_MEM_TYPE_SHIFT);
+		map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+	} while (*(++phys_addr));
+
+#ifdef CFG_MX7
+	/* Note IRAM_S_BASE is not 1M aligned, so take care */
+	map.pa = ROUNDDOWN(IRAM_S_BASE, CORE_MMU_PGDIR_SIZE);
+	map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_TEE_COHERENT,
+				       CORE_MMU_PGDIR_SIZE);
+	map.region_size = CORE_MMU_PGDIR_SIZE;
+	map.size = CORE_MMU_PGDIR_SIZE;
+	map.type = MEM_AREA_TEE_COHERENT;
+	map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL |
+				TEE_MATTR_SECURE | TEE_MATTR_PX;
+	map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+
+	map.pa = GIC_BASE;
+	map.va = (vaddr_t)phys_to_virt((paddr_t)GIC_BASE, MEM_AREA_IO_SEC,
+				       GIC_SIZE);
+	map.region_size = CORE_MMU_PGDIR_SIZE;
+	map.size = CORE_MMU_PGDIR_SIZE;
+	map.type = MEM_AREA_TEE_COHERENT;
+	map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL |
+				TEE_MATTR_SECURE | TEE_MATTR_PX;
+	map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+
+	/*
+	 * Note: DRAM space is not mapped, DRAM is in auto-selfrefresh,
+	 * If map DRAM in to MMU, mmu will access DRAM which
+	 * hang system.
+	 */
+#endif
+#ifdef CFG_MX7ULP
+	map.pa = M4_AIPS_BASE;
+	map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_IO_SEC, 0x100000);
+	map.region_size = 0x100000;
+	map.size = 0x100000;
+	map.type = MEM_AREA_TEE_COHERENT;
+	map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL |
+				 TEE_MATTR_SECURE;
+	map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+
+	map.pa = ROUNDDOWN(IRAM_BASE, 0x100000);
+	map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_TEE_COHERENT, 0x100000);
+	map.region_size = 0x100000;
+	map.size = 0x100000;
+	map.type = MEM_AREA_TEE_COHERENT;
+	map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL |
+				TEE_MATTR_SECURE | TEE_MATTR_PX;
+	map_memarea_sections(&map, (uint32_t *)iram_tlb_vaddr);
+
+	/*
+	 * We no need to give GIC a standalone entry, because AIPS0 has
+	 * already included GIC space. If not, map_memarea will
+	 * panic.
+	 *
+	 * Note: No map DRAM space, DRAM is in auto-selfrefresh,
+	 * If map DRAM in to MMU, mmu will access DRAM which
+	 * hang system.
+	 */
+#endif
+	return TEE_SUCCESS;
+}
+#else
+static TEE_Result init_ocram(void)
+{
+	return TEE_SUCCESS;
+}
+#endif
+
+/*
+ * The OCRAM initialization code must be done before the PCSI power management
+ * code. Otherwise, the TLB initialization will wipe the power management
+ * functions from the OCRAM memory space.
+ */
+early_init(init_ocram);
diff --git a/core/drivers/pm/imx/imx_pm.h b/core/drivers/pm/imx/imx_pm.h
new file mode 100644
index 000000000..f72cce5c7
--- /dev/null
+++ b/core/drivers/pm/imx/imx_pm.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2019 NXP
+ */
+
+#ifndef __IMX_PM_H
+#define __IMX_PM_H
+
+#include <stdint.h>
+
+#define SUSPEND_OCRAM_SIZE 0xE00
+#define LOWPOWER_IDLE_OCRAM_SIZE 0x1000
+
+#define SUSPEND_OCRAM_OFFSET 0x0
+#define LOWPOWER_IDLE_OCRAM_OFFSET 0xE00
+
+#define BUSFREQ_OCRAM_OFFSET \
+	(LOWPOWER_IDLE_OCRAM_OFFSET + LOWPOWER_IDLE_OCRAM_SIZE)
+#define BUSFREQ_MAX_SIZE (IRAM_TBL_OFFSET - BUSFREQ_OCRAM_OFFSET)
+
+#define PM_CORE_LEVEL    0
+#define PM_CLUSTER_LEVEL 1
+#define PM_SYSTEM_LEVEL  2
+
+#define SRC_GPR1			0x020
+#define SRC_GPR2			0x024
+
+#define SRC_A7RCR1			0x008
+#define SRC_A7RCR1_A7_CORE1_ENABLE_OFFSET	1
+
+/*
+ * Except i.MX6SX only 16KB ocram_s available, others use 16KB offset.
+ */
+#define IRAM_TBL_OFFSET			0x4000
+
+#if defined(CFG_MX6UL) || defined(CFG_MX6ULL)
+/* 128K OCRAM */
+#define TRUSTZONE_OCRAM_START		0x918000
+#elif defined(CFG_MX6DL)
+#define TRUSTZONE_OCRAM_START		0x918000
+#elif defined(CFG_MX6QP)
+#define TRUSTZONE_OCRAM_START		0x938000
+#elif defined(CFG_MX6SX)
+#define TRUSTZONE_OCRAM_START		0x8f8000
+#elif defined(CFG_MX6SL)
+#define TRUSTZONE_OCRAM_START		0x918000
+#elif defined(CFG_MX6SLL)
+#define TRUSTZONE_OCRAM_START		0x918000
+#elif defined(CFG_MX7)
+#define TRUSTZONE_OCRAM_START		0x180000
+#else
+/* 256K OCRAM */
+#define TRUSTZONE_OCRAM_START		0x938000
+#endif
+
+#ifndef __ASSEMBLER__
+#include <sm/sm.h>
+
+/**
+ * @brief   Definition of the struture given as first parameter to the
+ *          sm_pm_cpu_suspend assembly function. This parameter is the
+ *          argument of the device's power assembly function
+ */
+struct imx_pm_asm_arg {
+	paddr_t pa_addr;  /// Physical address of the pm block
+	void    *pm_info; /// Reference to the pm_info structure
+};
+
+extern int (*suspend_func)(uint32_t);
+extern struct imx_pm_asm_arg suspend_arg;
+extern vaddr_t pm_ocram_free_area;
+
+/* This structure will be used for suspend/resume and low power idle */
+#define MX6_MAX_MMDC_IO_NUM		36
+#define MX6_MAX_MMDC_NUM		36
+struct imx6_pm_info {
+	paddr_t		pa_base;	/* pa of pm_info */
+	paddr_t		tee_resume;
+	uint32_t	ddr_type;
+	uint32_t	pm_info_size;
+	paddr_t		mmdc0_pa_base;
+	vaddr_t		mmdc0_va_base;
+	paddr_t		mmdc1_pa_base;
+	vaddr_t		mmdc1_va_base;
+	paddr_t		src_pa_base;
+	vaddr_t		src_va_base;
+	paddr_t		iomuxc_pa_base;
+	vaddr_t		iomuxc_va_base;
+	paddr_t		ccm_pa_base;
+	vaddr_t		ccm_va_base;
+	paddr_t		gpc_pa_base;
+	vaddr_t		gpc_va_base;
+	vaddr_t		pl310_va_base;
+	paddr_t		anatop_pa_base;
+	vaddr_t		anatop_va_base;
+/* i.mx6sx */
+	paddr_t		sema4_pa_base;
+	vaddr_t		sema4_va_base;
+	uint32_t	ttbr0;
+	uint32_t	ttbr1;
+	uint32_t	diagnostic;
+	uint32_t	idle_state;
+	uint32_t	mmdc_io_num;
+	uint32_t	mmdc_io_val[MX6_MAX_MMDC_IO_NUM][3];
+	uint32_t	mmdc_num;
+	uint32_t	mmdc_val[MX6_MAX_MMDC_NUM][2];
+} __aligned(8);
+
+struct imx6_pm_data {
+	uint32_t   ddr_type;
+	uint32_t   mmdc_io_num;
+	const void *mmdc_io_offset;
+	uint32_t   mmdc_num;
+	const void *mmdc_offset;
+};
+
+/* The structure is used for suspend and low power idle */
+#define MX7_DDRC_NUM			32
+
+struct imx7_pm_info {
+	uint32_t	m4_reserve0;
+	uint32_t	m4_reserve1;
+	uint32_t	m4_reserve2;
+	vaddr_t		va_base;	/* va of pm_info */
+	paddr_t		pa_base;	/* pa of pm_info */
+	uintptr_t	entry;
+	paddr_t		tee_resume;
+	uint32_t	ddr_type;
+	uint32_t	pm_info_size;
+	paddr_t		ddrc_pa_base;
+	vaddr_t		ddrc_va_base;
+	paddr_t		ddrc_phy_pa_base;
+	vaddr_t		ddrc_phy_va_base;
+	paddr_t		src_pa_base;
+	vaddr_t		src_va_base;
+	paddr_t		iomuxc_gpr_pa_base;
+	vaddr_t		iomuxc_gpr_va_base;
+	paddr_t		ccm_pa_base;
+	vaddr_t		ccm_va_base;
+	paddr_t		gpc_pa_base;
+	vaddr_t		gpc_va_base;
+	paddr_t		snvs_pa_base;
+	vaddr_t		snvs_va_base;
+	paddr_t		anatop_pa_base;
+	vaddr_t		anatop_va_base;
+	paddr_t		lpsr_pa_base;
+	vaddr_t		lpsr_va_base;
+	paddr_t		gic_pa_base;
+	vaddr_t		gic_va_base;
+	uint32_t	ttbr0;
+	uint32_t	ttbr1;
+	uint32_t	num_online_cpus;
+	uint32_t	num_lpi_cpus;
+	uint32_t	val;
+	uint32_t	flag0;
+	uint32_t	flag1;
+	uint32_t	ddrc_num;
+	uint32_t	ddrc_val[MX7_DDRC_NUM][2];
+	uint32_t	ddrc_phy_num;
+	uint32_t	ddrc_phy_val[MX7_DDRC_NUM][2];
+} __aligned(8);
+
+#define MX7ULP_MAX_IOMUX_NUM		116
+#define MX7ULP_MAX_SELECT_INPUT_NUM	78
+#define MX7ULP_MAX_MMDC_IO_NUM		36
+#define MX7ULP_MAX_MMDC_NUM		50
+struct imx7ulp_pm_info {
+	uint32_t m4_reserve0;
+	uint32_t m4_reserve1;
+	uint32_t m4_reserve2;
+	paddr_t pbase; /* The physical address of pm_info. */
+	paddr_t resume_addr; /* The physical resume address for asm code */
+	uint32_t pm_info_size; /* Size of pm_info. */
+	vaddr_t sim_base;
+	vaddr_t scg1_base;
+	vaddr_t mmdc_base;
+	vaddr_t mmdc_io_base;
+	vaddr_t smc1_base;
+	uint32_t scg1[17];
+	uint32_t ttbr0;
+	uint32_t ttbr1;
+	uint32_t gpio[4][2];
+	uint32_t iomux_num;
+	uint32_t iomux_val[MX7ULP_MAX_IOMUX_NUM];
+	uint32_t select_input_num;
+	uint32_t select_input_val[MX7ULP_MAX_SELECT_INPUT_NUM];
+	uint32_t mmdc_io_num;
+	uint32_t mmdc_io_val[MX7ULP_MAX_MMDC_IO_NUM][2];
+	uint32_t mmdc_num;
+	uint32_t mmdc_val[MX7ULP_MAX_MMDC_NUM][2];
+} __aligned(8);
+
+
+struct imx7ulp_pm_data {
+	uint32_t ddr_type;
+	uint32_t mmdc_io_num;
+	uint32_t *mmdc_io_offset;
+	uint32_t mmdc_num;
+	uint32_t *mmdc_offset;
+};
+
+struct imx7_pm_data {
+	uint32_t ddr_type;
+	uint32_t ddrc_num;
+	uint32_t (*ddrc_offset)[2];
+	uint32_t ddrc_phy_num;
+	uint32_t (*ddrc_phy_offset)[2];
+};
+
+extern struct imx6_pm_data imx6ul_pm_data;
+extern struct imx6_pm_data imx6sl_pm_data;
+extern struct imx6_pm_data imx6sll_pm_data;
+extern struct imx6_pm_data imx6sx_pm_data;
+
+/* IMX6 Power initialization functions */
+int imx6_suspend_init(void);
+int imx6sx_cpuidle_init(void);
+int imx6ul_cpuidle_init(void);
+int imx6sl_cpuidle_init(void);
+int imx6sll_cpuidle_init(void);
+
+/* Low Power assembly functions */
+void imx6_suspend(struct imx6_pm_info *info);
+void imx6ul_low_power_idle(struct imx6_pm_info *info);
+void imx6ull_low_power_idle(struct imx6_pm_info *info);
+void imx6sx_low_power_idle(struct imx6_pm_info *info);
+void imx6sl_low_power_idle(struct imx6_pm_info *info);
+void imx6sll_low_power_idle(struct imx6_pm_info *info);
+void imx6_resume(void);
+void v7_cpu_resume(void);
+
+uint32_t get_imx6sx_low_power_idle_size(void);
+uint32_t get_imx6_suspend_size(void);
+
+
+int imx6ul_lowpower_idle(uint32_t power_state, uintptr_t entry,
+			 uint32_t context_id, struct sm_nsec_ctx *nsec);
+int imx6sx_lowpower_idle(uint32_t power_state, uintptr_t entry,
+			 uint32_t context_id, struct sm_nsec_ctx *nsec);
+int imx6sll_lowpower_idle(uint32_t power_state, uintptr_t entry,
+			  uint32_t context_id, struct sm_nsec_ctx *nsec);
+int imx6sl_lowpower_idle(uint32_t power_state, uintptr_t entry,
+			 uint32_t context_id, struct sm_nsec_ctx *nsec);
+int imx6_cpu_suspend(uint32_t power_state, uintptr_t entry, uint32_t context_id,
+		     struct sm_nsec_ctx *nsec);
+
+/* IMX7 Power Initialization functions */
+int imx7_suspend_init(void);
+int imx7ulp_suspend_init(void);
+int imx7d_cpuidle_init(void);
+
+void imx7_suspend(struct imx7_pm_info *info);
+void imx7_resume(void);
+void ca7_cpu_resume(void);
+
+int imx7_cpu_suspend(uint32_t power_state, uintptr_t entry, uint32_t context_id,
+		     struct sm_nsec_ctx *nsec);
+int imx7d_lowpower_idle(uint32_t power_state, uintptr_t entry,
+			uint32_t context_id, struct sm_nsec_ctx *nsec);
+void imx7d_low_power_idle(struct imx7_pm_info *info);
+
+int imx7ulp_cpu_suspend(uint32_t power_state, uintptr_t entry,
+			uint32_t context_id, struct sm_nsec_ctx *nsec);
+void imx7ulp_suspend(struct imx7ulp_pm_info *info);
+void imx7ulp_cpu_resume(void);
+
+enum imx7ulp_sys_pwr_mode {
+	HSRUN,
+	RUN,
+	VLPR,
+	WAIT,
+	VLPW,
+	STOP,
+	VLPS,
+	VLLS,
+};
+
+/* IMX8 pm handlers */
+unsigned long cpu_resume_handler(unsigned long a0, unsigned long a1);
+unsigned long cpu_suspend_handler(unsigned long a0, unsigned long a1);
+
+paddr_t imx_get_ocram_tz_start_addr(void);
+
+#ifdef CFG_PSCI_ARM32
+inline void plat_cpu_wakeup_late(void) {}
+#endif
+
+int imx_get_ddr_type(void);
+
+#endif
+#endif
diff --git a/core/drivers/pm/imx/imx_pm_asm_defines.c b/core/drivers/pm/imx/imx_pm_asm_defines.c
new file mode 100644
index 000000000..5672013cd
--- /dev/null
+++ b/core/drivers/pm/imx/imx_pm_asm_defines.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2018, Linaro Limited
+ * Copyright 2018 NXP
+ */
+
+#include <gen-asm-defines.h>
+#include "imx_pm.h"
+
+DEFINES
+{
+	/* Definition of the Assembly function arguments */
+	DEFINE(PM_ASM_ARG_PA_ADDR_OFF,
+	       offsetof(struct imx_pm_asm_arg, pa_addr));
+	DEFINE(PM_ASM_ARG_PM_INFO_OFF,
+	       offsetof(struct imx_pm_asm_arg, pm_info));
+
+	/* Definition of the MX7 PM info structure offset */
+	DEFINE(PM_INFO_MX7_M4_RESERVE0_OFF,
+	       offsetof(struct imx7_pm_info, m4_reserve0));
+	DEFINE(PM_INFO_MX7_M4_RESERVE1_OFF,
+	       offsetof(struct imx7_pm_info, m4_reserve1));
+	DEFINE(PM_INFO_MX7_M4_RESERVE2_OFF,
+	       offsetof(struct imx7_pm_info, m4_reserve2));
+	DEFINE(PM_INFO_MX7_VBASE_OFF, offsetof(struct imx7_pm_info, va_base));
+	DEFINE(PM_INFO_MX7_PBASE_OFF, offsetof(struct imx7_pm_info, pa_base));
+	DEFINE(PM_INFO_MX7_ENTRY_OFF, offsetof(struct imx7_pm_info, entry));
+	DEFINE(PM_INFO_MX7_RESUME_ADDR_OFF,
+	       offsetof(struct imx7_pm_info, tee_resume));
+	DEFINE(PM_INFO_MX7_DDR_TYPE_OFF,
+	       offsetof(struct imx7_pm_info, ddr_type));
+	DEFINE(PM_INFO_MX7_SIZE_OFF,
+	       offsetof(struct imx7_pm_info, pm_info_size));
+	DEFINE(PM_INFO_MX7_DDRC_P_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_pa_base));
+	DEFINE(PM_INFO_MX7_DDRC_V_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_va_base));
+	DEFINE(PM_INFO_MX7_DDRC_PHY_P_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_phy_pa_base));
+	DEFINE(PM_INFO_MX7_DDRC_PHY_V_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_phy_va_base));
+	DEFINE(PM_INFO_MX7_SRC_P_OFF,
+	       offsetof(struct imx7_pm_info, src_pa_base));
+	DEFINE(PM_INFO_MX7_SRC_V_OFF,
+	       offsetof(struct imx7_pm_info, src_va_base));
+	DEFINE(PM_INFO_MX7_IOMUXC_GPR_P_OFF,
+	       offsetof(struct imx7_pm_info, iomuxc_gpr_pa_base));
+	DEFINE(PM_INFO_MX7_IOMUXC_GPR_V_OFF,
+	       offsetof(struct imx7_pm_info, iomuxc_gpr_va_base));
+	DEFINE(PM_INFO_MX7_CCM_P_OFF,
+	       offsetof(struct imx7_pm_info, ccm_pa_base));
+	DEFINE(PM_INFO_MX7_CCM_V_OFF,
+	       offsetof(struct imx7_pm_info, ccm_va_base));
+	DEFINE(PM_INFO_MX7_GPC_P_OFF,
+	       offsetof(struct imx7_pm_info, gpc_pa_base));
+	DEFINE(PM_INFO_MX7_GPC_V_OFF,
+	       offsetof(struct imx7_pm_info, gpc_va_base));
+	DEFINE(PM_INFO_MX7_SNVS_P_OFF,
+	       offsetof(struct imx7_pm_info, snvs_pa_base));
+	DEFINE(PM_INFO_MX7_SNVS_V_OFF,
+	       offsetof(struct imx7_pm_info, snvs_va_base));
+	DEFINE(PM_INFO_MX7_ANATOP_P_OFF,
+	       offsetof(struct imx7_pm_info, anatop_pa_base));
+	DEFINE(PM_INFO_MX7_ANATOP_V_OFF,
+	       offsetof(struct imx7_pm_info, anatop_va_base));
+	DEFINE(PM_INFO_MX7_LPSR_P_OFF,
+	       offsetof(struct imx7_pm_info, lpsr_pa_base));
+	DEFINE(PM_INFO_MX7_LPSR_V_OFF,
+	       offsetof(struct imx7_pm_info, lpsr_va_base));
+	DEFINE(PM_INFO_MX7_GIC_DIST_P_OFF,
+	       offsetof(struct imx7_pm_info, gic_pa_base));
+	DEFINE(PM_INFO_MX7_GIC_DIST_V_OFF,
+	       offsetof(struct imx7_pm_info, gic_va_base));
+	DEFINE(PM_INFO_MX7_TTBR0_OFF, offsetof(struct imx7_pm_info, ttbr0));
+	DEFINE(PM_INFO_MX7_TTBR1_OFF, offsetof(struct imx7_pm_info, ttbr1));
+	DEFINE(PM_INFO_MX7_NUM_ONLINE_CPUS_OFF,
+	       offsetof(struct imx7_pm_info, num_online_cpus));
+	DEFINE(PM_INFO_MX7_NUM_LPI_CPUS_OFF,
+	       offsetof(struct imx7_pm_info, num_lpi_cpus));
+	DEFINE(PM_INFO_MX7_VAL_OFF, offsetof(struct imx7_pm_info, val));
+	DEFINE(PM_INFO_MX7_FLAG0_OFF, offsetof(struct imx7_pm_info, flag0));
+	DEFINE(PM_INFO_MX7_FLAG1_OFF, offsetof(struct imx7_pm_info, flag1));
+	DEFINE(PM_INFO_MX7_DDRC_REG_NUM_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_num));
+	DEFINE(PM_INFO_MX7_DDRC_REG_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_val));
+	DEFINE(PM_INFO_MX7_DDRC_PHY_REG_NUM_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_phy_num));
+	DEFINE(PM_INFO_MX7_DDRC_PHY_REG_OFF,
+	       offsetof(struct imx7_pm_info, ddrc_phy_val));
+
+	DEFINE(PM_INFO_PBASE_OFF,
+	       offsetof(struct imx6_pm_info, pa_base));
+	DEFINE(PM_INFO_TEE_RESUME_OFF,
+	       offsetof(struct imx6_pm_info, tee_resume));
+	DEFINE(PM_INFO_DDR_TYPE_OFF,
+	       offsetof(struct imx6_pm_info, ddr_type));
+	DEFINE(PM_INFO_INFO_SIZE_OFF,
+	       offsetof(struct imx6_pm_info, pm_info_size));
+	DEFINE(PM_INFO_MMDC0_P_OFF,
+	       offsetof(struct imx6_pm_info, mmdc0_pa_base));
+	DEFINE(PM_INFO_MMDC0_V_OFF,
+	       offsetof(struct imx6_pm_info, mmdc0_va_base));
+	DEFINE(PM_INFO_MMDC1_P_OFF,
+	       offsetof(struct imx6_pm_info, mmdc1_pa_base));
+	DEFINE(PM_INFO_MMDC1_V_OFF,
+	       offsetof(struct imx6_pm_info, mmdc1_va_base));
+	DEFINE(PM_INFO_SRC_P_OFF,
+	       offsetof(struct imx6_pm_info, src_pa_base));
+	DEFINE(PM_INFO_SRC_V_OFF,
+	       offsetof(struct imx6_pm_info, src_va_base));
+	DEFINE(PM_INFO_IOMUXC_P_OFF,
+	       offsetof(struct imx6_pm_info, iomuxc_pa_base));
+	DEFINE(PM_INFO_IOMUXC_V_OFF,
+	       offsetof(struct imx6_pm_info, iomuxc_va_base));
+	DEFINE(PM_INFO_CCM_P_OFF,
+	       offsetof(struct imx6_pm_info, ccm_pa_base));
+	DEFINE(PM_INFO_CCM_V_OFF,
+	       offsetof(struct imx6_pm_info, ccm_va_base));
+	DEFINE(PM_INFO_GPC_P_OFF,
+	       offsetof(struct imx6_pm_info, gpc_pa_base));
+	DEFINE(PM_INFO_GPC_V_OFF,
+	       offsetof(struct imx6_pm_info, gpc_va_base));
+	DEFINE(PM_INFO_PL310_V_OFF,
+	       offsetof(struct imx6_pm_info, pl310_va_base));
+	DEFINE(PM_INFO_ANATOP_P_OFF,
+	       offsetof(struct imx6_pm_info, anatop_pa_base));
+	DEFINE(PM_INFO_ANATOP_V_OFF,
+	       offsetof(struct imx6_pm_info, anatop_va_base));
+	DEFINE(PM_INFO_SEMA4_P_OFF,
+	       offsetof(struct imx6_pm_info, sema4_pa_base));
+	DEFINE(PM_INFO_SEMA4_V_OFF,
+	       offsetof(struct imx6_pm_info, sema4_va_base));
+	DEFINE(PM_INFO_TTBR0_OFF,
+	       offsetof(struct imx6_pm_info, ttbr0));
+	DEFINE(PM_INFO_TTBR1_OFF,
+	       offsetof(struct imx6_pm_info, ttbr1));
+	DEFINE(PM_INFO_SAVED_DIAGNOSTIC_OFF,
+	       offsetof(struct imx6_pm_info, diagnostic));
+	DEFINE(PM_INFO_IDLE_STATE,
+	       offsetof(struct imx6_pm_info, idle_state));
+	DEFINE(PM_INFO_MMDC_IO_NUM_OFF,
+	       offsetof(struct imx6_pm_info, mmdc_io_num));
+	DEFINE(PM_INFO_MMDC_IO_VAL_OFF,
+	       offsetof(struct imx6_pm_info, mmdc_io_val));
+/* below offsets depends on MX6_MAX_MMDC_IO_NUM(36) definition */
+	DEFINE(PM_INFO_MMDC_NUM_OFF,
+	       offsetof(struct imx6_pm_info, mmdc_num));
+	DEFINE(PM_INFO_MMDC_VAL_OFF,
+	       offsetof(struct imx6_pm_info, mmdc_val));
+
+	DEFINE(PM_INFO_MX7ULP_M4_RESERVE0_OFF,
+	       offsetof(struct imx7ulp_pm_info, m4_reserve0));
+	DEFINE(PM_INFO_MX7ULP_M4_RESERVE1_OFF,
+	       offsetof(struct imx7ulp_pm_info, m4_reserve1));
+	DEFINE(PM_INFO_MX7ULP_M4_RESERVE2_OFF,
+	       offsetof(struct imx7ulp_pm_info, m4_reserve2));
+	DEFINE(PM_INFO_MX7ULP_PBASE_OFF,
+	       offsetof(struct imx7ulp_pm_info, pbase));
+	DEFINE(PM_INFO_MX7ULP_RESUME_ADDR_OFF,
+	       offsetof(struct imx7ulp_pm_info, resume_addr));
+	DEFINE(PM_INFO_MX7ULP_SIZE_OFF,
+	       offsetof(struct imx7ulp_pm_info, pm_info_size));
+	DEFINE(PM_INFO_MX7ULP_SIM_VBASE_OFF,
+	       offsetof(struct imx7ulp_pm_info, sim_base));
+	DEFINE(PM_INFO_MX7ULP_SCG1_VBASE_OFF,
+	       offsetof(struct imx7ulp_pm_info, scg1_base));
+	DEFINE(PM_INFO_MX7ULP_MMDC_VBASE_OFF,
+	       offsetof(struct imx7ulp_pm_info, mmdc_base));
+	DEFINE(PM_INFO_MX7ULP_MMDC_IO_VBASE_OFF,
+	       offsetof(struct imx7ulp_pm_info, mmdc_io_base));
+	DEFINE(PM_INFO_MX7ULP_SMC1_VBASE_OFF,
+	       offsetof(struct imx7ulp_pm_info, smc1_base));
+	DEFINE(PM_INFO_MX7ULP_SCG1_VAL_OFF,
+	       offsetof(struct imx7ulp_pm_info, scg1));
+	DEFINE(PM_INFO_MX7ULP_TTBR0_V_OFF,
+	       offsetof(struct imx7ulp_pm_info, ttbr0));
+	DEFINE(PM_INFO_MX7ULP_TTBR1_V_OFF,
+	       offsetof(struct imx7ulp_pm_info, ttbr1));
+	DEFINE(PM_INFO_MX7ULP_GPIO_REG_OFF,
+	       offsetof(struct imx7ulp_pm_info, gpio));
+	DEFINE(PM_INFO_MX7ULP_IOMUX_NUM_OFF,
+	       offsetof(struct imx7ulp_pm_info, iomux_num));
+	DEFINE(PM_INFO_MX7ULP_IOMUX_VAL_OFF,
+	       offsetof(struct imx7ulp_pm_info, iomux_val));
+	DEFINE(PM_INFO_MX7ULP_SELECT_INPUT_NUM_OFF,
+	       offsetof(struct imx7ulp_pm_info, select_input_num));
+	DEFINE(PM_INFO_MX7ULP_SELECT_INPUT_VAL_OFF,
+	       offsetof(struct imx7ulp_pm_info, select_input_val));
+	DEFINE(PM_INFO_MX7ULP_MMDC_IO_NUM_OFF,
+	       offsetof(struct imx7ulp_pm_info, mmdc_io_num));
+	DEFINE(PM_INFO_MX7ULP_MMDC_IO_VAL_OFF,
+	       offsetof(struct imx7ulp_pm_info, mmdc_io_val));
+/* below offsets depends on MX7ULP_MAX_MMDC_IO_NUM(36) definition */
+	DEFINE(PM_INFO_MX7ULP_MMDC_NUM_OFF,
+	       offsetof(struct imx7ulp_pm_info, mmdc_num));
+	DEFINE(PM_INFO_MX7ULP_MMDC_VAL_OFF,
+	       offsetof(struct imx7ulp_pm_info, mmdc_val));
+}
diff --git a/core/drivers/pm/imx/mmdc.c b/core/drivers/pm/imx/mmdc.c
new file mode 100644
index 000000000..98dd45820
--- /dev/null
+++ b/core/drivers/pm/imx/mmdc.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2021 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <arm.h>
+#include <io.h>
+#include <imx.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+
+#include "imx_pm.h"
+
+/* i.MX6 */
+#define MMDC_MDMISC		0x18
+#define MDMISC_DDR_TYPE_MASK	GENMASK_32(4, 3)
+#define MDMISC_DDR_TYPE_SHIFT	0x3
+
+/* i.MX7 */
+#define DDRC_MSTR		0x0
+#define MSTR_DDR3		BIT(0)
+#define MSTR_LPDDR2		BIT(2)
+#define MSTR_LPDDR3		BIT(3)
+
+int imx_get_ddr_type(void)
+{
+	uint32_t val = 0;
+	uint32_t off = 0;
+	bool is_mx7 = soc_is_imx7ds();
+	vaddr_t mmdc_base = 0;
+
+	if (is_mx7)
+		off = DDRC_MSTR;
+	else
+		off = MMDC_MDMISC;
+
+	mmdc_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+				    off + sizeof(uint32_t));
+	val = io_read32(mmdc_base + off);
+
+	if (is_mx7) {
+		if (val & MSTR_DDR3)
+			return IMX_DDR_TYPE_DDR3;
+		else if (val & MSTR_LPDDR2)
+			return IMX_DDR_TYPE_LPDDR2;
+		else if (val & MSTR_LPDDR3)
+			return IMX_DDR_TYPE_LPDDR3;
+		else
+			return -1;
+	}
+
+	return (val & MDMISC_DDR_TYPE_MASK) >> MDMISC_DDR_TYPE_SHIFT;
+}
diff --git a/core/drivers/pm/imx/pm-imx6.c b/core/drivers/pm/imx/pm-imx6.c
new file mode 100644
index 000000000..b8ab9d3dd
--- /dev/null
+++ b/core/drivers/pm/imx/pm-imx6.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <arm.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <io.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/panic.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <string.h>
+
+const uint32_t imx6q_mmdc_io_offset[] = {
+	0x5ac, 0x5b4, 0x528, 0x520, /* DQM0 ~ DQM3 */
+	0x514, 0x510, 0x5bc, 0x5c4, /* DQM4 ~ DQM7 */
+	0x56c, 0x578, 0x588, 0x594, /* CAS, RAS, SDCLK_0, SDCLK_1 */
+	0x5a8, 0x5b0, 0x524, 0x51c, /* SDQS0 ~ SDQS3 */
+	0x518, 0x50c, 0x5b8, 0x5c0, /* SDQS4 ~ SDQS7 */
+	0x784, 0x788, 0x794, 0x79c, /* GPR_B0DS ~ GPR_B3DS */
+	0x7a0, 0x7a4, 0x7a8, 0x748, /* GPR_B4DS ~ GPR_B7DS */
+	0x59c, 0x5a0, 0x750, 0x774, /* SODT0, SODT1, MODE_CTL, MODE */
+	0x74c,                      /* GPR_ADDS */
+};
+
+const uint32_t imx6dl_mmdc_io_offset[] = {
+	0x470, 0x474, 0x478, 0x47c, /* DQM0 ~ DQM3 */
+	0x480, 0x484, 0x488, 0x48c, /* DQM4 ~ DQM7 */
+	0x464, 0x490, 0x4ac, 0x4b0, /* CAS, RAS, SDCLK_0, SDCLK_1 */
+	0x4bc, 0x4c0, 0x4c4, 0x4c8, /* DRAM_SDQS0 ~ DRAM_SDQS3 */
+	0x4cc, 0x4d0, 0x4d4, 0x4d8, /* DRAM_SDQS4 ~ DRAM_SDQS7 */
+	0x764, 0x770, 0x778, 0x77c, /* GPR_B0DS ~ GPR_B3DS */
+	0x780, 0x784, 0x78c, 0x748, /* GPR_B4DS ~ GPR_B7DS */
+	0x4b4, 0x4b8, 0x750, 0x760, /* SODT0, SODT1, MODE_CTL, MODE */
+	0x74c,                      /* GPR_ADDS */
+};
+
+const uint32_t imx6ul_mmdc_io_offset[] = {
+	0x244, 0x248, 0x24c, 0x250, /* DQM0, DQM1, RAS, CAS */
+	0x27c, 0x498, 0x4a4, 0x490, /* SDCLK0, GPR_B0DS-B1DS, GPR_ADDS */
+	0x280, 0x284, 0x260, 0x264, /* SDQS0~1, SODT0, SODT1 */
+	0x494, 0x4b0,               /* MODE_CTL, MODE, */
+};
+
+const uint32_t imx6ul_mmdc_offset[] = {
+	0x01c, 0x800, 0x80c, 0x83c,
+	0x848, 0x850, 0x81c, 0x820,
+	0x82c, 0x830, 0x8c0, 0x8b8,
+	0x004, 0x008, 0x00c, 0x010,
+	0x014, 0x018, 0x01c, 0x02c,
+	0x030, 0x040, 0x000, 0x01c,
+	0x020, 0x818, 0x01c,
+};
+
+const uint32_t imx6ul_mmdc_io_lpddr2_offset[] = {
+	0x244, 0x248, 0x24c, 0x250,	/* DQM0, DQM1, RAS, CAS */
+	0x27c, 0x498, 0x4a4, 0x490,	/* SDCLK0, GPR_B0DS-B1DS, GPR_ADDS */
+	0x280, 0x284, 0x260, 0x264,	/* SDQS0~1, SODT0, SODT1 */
+	0x494, 0x4b0, 0x274, 0x278,	/* MODE_CTL, MODE, SDCKE0, SDCKE1 */
+	0x288,				/* DRAM_RESET */
+};
+
+const uint32_t imx6ul_mmdc_lpddr2_offset[] = {
+	0x01c, 0x85c, 0x800, 0x890,
+	0x8b8, 0x81c, 0x820, 0x82c,
+	0x830, 0x83c, 0x848, 0x850,
+	0x8c0, 0x8b8, 0x004, 0x008,
+	0x00c, 0x010, 0x038, 0x014,
+	0x018, 0x01c, 0x02c, 0x030,
+	0x040, 0x000, 0x020, 0x818,
+	0x800, 0x004, 0x01c,
+};
+
+const uint32_t imx6sx_mmdc_io_offset[] = {
+	0x2ec, 0x2f0, 0x2f4, 0x2f8,
+	0x60c, 0x610, 0x61c, 0x620,
+	0x300, 0x2fc, 0x32c, 0x5f4,
+	0x310, 0x314, 0x5f8, 0x608,
+	0x330, 0x334, 0x338, 0x33c,
+};
+
+const uint32_t imx6sx_mmdc_offset[] = {
+	0x800, 0x80c, 0x810, 0x83c,
+	0x840, 0x848, 0x850, 0x81c,
+	0x820, 0x824, 0x828, 0x8b8,
+	0x004, 0x008, 0x00c, 0x010,
+	0x014, 0x018, 0x01c, 0x02c,
+	0x030, 0x040, 0x000, 0x01c,
+	0x020, 0x818, 0x01c,
+};
+
+const uint32_t imx6sx_mmdc_io_lpddr2_offset[] = {
+	0x2ec, 0x2f0, 0x2f4, 0x2f8,
+	0x300, 0x2fc, 0x32c, 0x5f4,
+	0x60c, 0x610, 0x61c, 0x620,
+	0x310, 0x314, 0x5f8, 0x608,
+	0x330, 0x334, 0x338, 0x33c,
+	0x324, 0x328, 0x340,
+};
+
+const uint32_t imx6sx_mmdc_lpddr2_offset[] = {
+	0x01c, 0x85c, 0x800, 0x890,
+	0x8b8, 0x81c, 0x820, 0x824,
+	0x828, 0x82c, 0x830, 0x834,
+	0x838, 0x848, 0x850, 0x8c0,
+	0x83c, 0x840, 0x8b8, 0x00c,
+	0x004, 0x010, 0x014, 0x018,
+	0x02c, 0x030, 0x038, 0x008,
+	0x040, 0x000, 0x020, 0x818,
+	0x800, 0x004, 0x01c,
+};
+
+const uint32_t imx6sl_mmdc_io_offset[] = {
+	0x30c, 0x310, 0x314, 0x318, /* DQM0 ~ DQM3 */
+	0x5c4, 0x5cc, 0x5d4, 0x5d8, /* GPR_B0DS ~ GPR_B3DS */
+	0x300, 0x31c, 0x338, 0x5ac, /* CAS, RAS, SDCLK_0, GPR_ADDS */
+	0x33c, 0x340, 0x5b0, 0x5c0, /* SODT0, SODT1, MODE_CTL, MODE */
+	0x330, 0x334, 0x320,        /* SDCKE0, SDCKE1, RESET */
+};
+
+const uint32_t imx6sll_mmdc_io_offset[] = {
+	0x294, 0x298, 0x29c, 0x2a0, /* DQM0 ~ DQM3 */
+	0x544, 0x54c, 0x554, 0x558, /* GPR_B0DS ~ GPR_B3DS */
+	0x530, 0x540, 0x2ac, 0x52c, /* MODE_CTL, MODE, SDCLK_0, GPR_ADDDS */
+	0x2a4, 0x2a8,               /* SDCKE0, SDCKE1*/
+};
+
+const uint32_t imx6sll_mmdc_lpddr3_offset[] = {
+	0x01c, 0x85c, 0x800, 0x890,
+	0x8b8, 0x81c, 0x820, 0x82c,
+	0x830, 0x83c, 0x848, 0x850,
+	0x8c0, 0x8b8, 0x004, 0x008,
+	0x00c, 0x010, 0x038, 0x014,
+	0x018, 0x01c, 0x02c, 0x030,
+	0x040, 0x000, 0x020, 0x818,
+	0x800, 0x004, 0x01c,
+};
+
+static struct imx6_pm_data imx6q_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6q_mmdc_io_offset),
+	.mmdc_io_offset = imx6q_mmdc_io_offset,
+	.mmdc_num = 0,
+	.mmdc_offset = NULL,
+};
+
+static struct imx6_pm_data imx6dl_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6dl_mmdc_io_offset),
+	.mmdc_io_offset = imx6dl_mmdc_io_offset,
+	.mmdc_num = 0,
+	.mmdc_offset = NULL,
+};
+
+struct imx6_pm_data imx6ul_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6ul_mmdc_io_offset),
+	.mmdc_io_offset = imx6ul_mmdc_io_offset,
+	.mmdc_num = ARRAY_SIZE(imx6ul_mmdc_offset),
+	.mmdc_offset = imx6ul_mmdc_offset,
+};
+
+static struct imx6_pm_data imx6ul_lpddr2_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6ul_mmdc_io_lpddr2_offset),
+	.mmdc_io_offset = imx6ul_mmdc_io_lpddr2_offset,
+	.mmdc_num = ARRAY_SIZE(imx6ul_mmdc_lpddr2_offset),
+	.mmdc_offset = imx6ul_mmdc_lpddr2_offset,
+};
+
+struct imx6_pm_data imx6sx_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6sx_mmdc_io_offset),
+	.mmdc_io_offset = imx6sx_mmdc_io_offset,
+	.mmdc_num = ARRAY_SIZE(imx6sx_mmdc_offset),
+	.mmdc_offset = imx6sx_mmdc_offset,
+};
+
+static struct imx6_pm_data imx6sx_lpddr2_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6sx_mmdc_io_lpddr2_offset),
+	.mmdc_io_offset = imx6sx_mmdc_io_lpddr2_offset,
+	.mmdc_num = ARRAY_SIZE(imx6sx_mmdc_lpddr2_offset),
+	.mmdc_offset = imx6sx_mmdc_lpddr2_offset,
+};
+
+struct imx6_pm_data imx6sl_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6sl_mmdc_io_offset),
+	.mmdc_io_offset = imx6sl_mmdc_io_offset,
+	.mmdc_num = 0,
+	.mmdc_offset = NULL,
+};
+
+struct imx6_pm_data imx6sll_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx6sll_mmdc_io_offset),
+	.mmdc_io_offset = imx6sll_mmdc_io_offset,
+	.mmdc_num = ARRAY_SIZE(imx6sll_mmdc_lpddr3_offset),
+	.mmdc_offset = imx6sll_mmdc_lpddr3_offset,
+};
+
+/**
+ * @brief   PM OCRAM free area start address
+ */
+vaddr_t pm_ocram_free_area;
+
+int imx6_suspend_init(void)
+{
+	uint32_t i;
+	const uint32_t *mmdc_offset_array;
+	const uint32_t *mmdc_io_offset_array;
+	struct imx6_pm_data *pm_data;
+
+	uint32_t suspend_ocram_base = core_mmu_get_va(
+					imx_get_ocram_tz_start_addr() +
+					SUSPEND_OCRAM_OFFSET,
+						MEM_AREA_TEE_COHERENT,
+						SUSPEND_OCRAM_SIZE);
+	struct imx6_pm_info *p = (struct imx6_pm_info *)suspend_ocram_base;
+
+	uint32_t func_size = get_imx6_suspend_size();
+
+	suspend_func = (int (*)(uint32_t))(suspend_ocram_base + sizeof(*p));
+
+	suspend_arg.pa_addr = virt_to_phys((void *)(vaddr_t)suspend_func);
+	suspend_arg.pm_info = p;
+
+	p->pa_base =  imx_get_ocram_tz_start_addr() + SUSPEND_OCRAM_OFFSET;
+	p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume);
+	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, CCM_SIZE);
+	p->ccm_pa_base = CCM_BASE;
+	p->mmdc0_va_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC,
+					   MMDC_P0_SIZE);
+	p->mmdc0_pa_base = MMDC_P0_BASE;
+#if !(defined(CFG_MX6UL) || defined(CFG_MX6ULL) || defined(CFG_MX6SL) || \
+      defined(CFG_MX6SLL))
+	p->mmdc1_va_base = core_mmu_get_va(MMDC_P1_BASE, MEM_AREA_IO_SEC,
+					   MMDC_P1_SIZE);
+	p->mmdc1_pa_base = MMDC_P1_BASE;
+#endif
+	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, SRC_SIZE);
+	p->src_pa_base = SRC_BASE;
+	p->iomuxc_va_base = core_mmu_get_va(IOMUXC_BASE, MEM_AREA_IO_SEC,
+					    IOMUXC_SIZE);
+	p->iomuxc_pa_base = IOMUXC_BASE;
+	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, GPC_SIZE);
+	p->gpc_pa_base = GPC_BASE;
+#ifdef CFG_PL310
+	p->pl310_va_base = core_mmu_get_va(PL310_BASE, MEM_AREA_IO_SEC,
+					   PL310_SIZE);
+#endif
+	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC,
+					    ANATOP_SIZE);
+	p->anatop_pa_base = ANATOP_BASE;
+
+	if (soc_is_imx6sx()) {
+		p->sema4_pa_base = SEMA4_BASE;
+		p->sema4_va_base = core_mmu_get_va(SEMA4_BASE, MEM_AREA_IO_SEC,
+						   SEMA4_SIZE);
+	}
+
+	p->ddr_type = imx_get_ddr_type();
+	switch (p->ddr_type) {
+	case IMX_DDR_TYPE_DDR3:
+		if (soc_is_imx6dq() || soc_is_imx6dqp())
+			pm_data = &imx6q_pm_data;
+		else if (soc_is_imx6sdl())
+			pm_data = &imx6dl_pm_data;
+		else if (soc_is_imx6ul() || soc_is_imx6ull())
+			pm_data = &imx6ul_pm_data;
+		else if (soc_is_imx6sx())
+			pm_data = &imx6sx_pm_data;
+		else
+			panic("Not supported\n");
+
+		break;
+	case IMX_DDR_TYPE_LPDDR2:
+		if (soc_is_imx6ul() || soc_is_imx6ull())
+			pm_data = &imx6ul_lpddr2_pm_data;
+		else if (soc_is_imx6sl())
+			pm_data = &imx6sl_pm_data;
+		else if (soc_is_imx6sx())
+			pm_data = &imx6sx_lpddr2_pm_data;
+		else
+			panic("No lpddr2 support\n");
+		break;
+
+	case IMX_MMDC_DDR_TYPE_LPDDR3:
+		pm_data = &imx6sll_pm_data;
+		break;
+	default:
+		panic("Not supported ddr type\n");
+		break;
+	}
+
+	p->mmdc_io_num = pm_data->mmdc_io_num;
+	p->mmdc_num = pm_data->mmdc_num;
+	mmdc_io_offset_array = pm_data->mmdc_io_offset;
+	mmdc_offset_array = pm_data->mmdc_offset;
+
+	for (i = 0; i < p->mmdc_io_num; i++) {
+		p->mmdc_io_val[i][0] = mmdc_io_offset_array[i];
+		/* Not sure why read32 will complain readelf warning */
+		p->mmdc_io_val[i][1] = io_read32(p->iomuxc_va_base +
+					mmdc_io_offset_array[i]);
+		p->mmdc_io_val[i][2] = 0;
+	}
+
+	/* There is no DRAM RESET pin on i.MX6SLL */
+	if (soc_is_imx6sll()) {
+		p->mmdc_io_val[p->mmdc_io_num - 2][2] = 0x1000;
+		p->mmdc_io_val[p->mmdc_io_num - 1][2] = 0x1000;
+	} else {
+		if (p->ddr_type == IMX_DDR_TYPE_LPDDR2) {
+		/* For LPDDR2, CKE0/1 and RESET pin need special setting */
+			p->mmdc_io_val[p->mmdc_io_num - 3][2] = 0x1000;
+			p->mmdc_io_val[p->mmdc_io_num - 2][2] = 0x1000;
+			p->mmdc_io_val[p->mmdc_io_num - 1][2] = 0x1000;
+		}
+	}
+
+	for (i = 0; i < p->mmdc_num; i++) {
+		p->mmdc_val[i][0] = mmdc_offset_array[i];
+		p->mmdc_val[i][1] = io_read32(p->mmdc0_va_base +
+					mmdc_offset_array[i]);
+	}
+
+	if (soc_is_imx6sll() && p->ddr_type == IMX_MMDC_DDR_TYPE_LPDDR3) {
+		p->mmdc_val[0][1] = 0x8000;
+		p->mmdc_val[2][1] = 0xa1390003;
+		p->mmdc_val[3][1] = 0x400000;
+		p->mmdc_val[4][1] = 0x800;
+		p->mmdc_val[13][1] = 0x800;
+		p->mmdc_val[14][1] = 0x20052;
+		p->mmdc_val[20][1] = 0x201718;
+		p->mmdc_val[21][1] = 0x8000;
+		p->mmdc_val[28][1] = 0xa1310003;
+	}
+
+	if ((soc_is_imx6sx() || soc_is_imx6ul() || soc_is_imx6ull()) &&
+	    p->ddr_type != IMX_DDR_TYPE_LPDDR2) {
+		p->mmdc_val[20][1] = (p->mmdc_val[20][1] &
+				      0xffff0000) | 0x0202;
+		p->mmdc_val[23][1] = 0x8033;
+	}
+
+	if (soc_is_imx6sx() && p->ddr_type == IMX_DDR_TYPE_LPDDR2) {
+		p->mmdc_val[0][1] = 0x8000;
+		p->mmdc_val[2][1] = 0xa1390003;
+		p->mmdc_val[3][1] = 0x380000;
+		p->mmdc_val[4][1] = 0x800;
+		p->mmdc_val[18][1] = 0x800;
+		p->mmdc_val[20][1] = 0x20024;
+		p->mmdc_val[23][1] = 0x1748;
+		p->mmdc_val[32][1] = 0xa1310003;
+	}
+
+	if ((soc_is_imx6ul() || soc_is_imx6ull()) &&
+	    p->ddr_type == IMX_DDR_TYPE_LPDDR2) {
+		p->mmdc_val[0][1] = 0x8000;
+		p->mmdc_val[2][1] = 0xa1390003;
+		p->mmdc_val[3][1] = 0x470000;
+		p->mmdc_val[4][1] = 0x800;
+		p->mmdc_val[13][1] = 0x800;
+		p->mmdc_val[14][1] = 0x20012;
+		p->mmdc_val[20][1] = 0x1748;
+		p->mmdc_val[21][1] = 0x8000;
+		p->mmdc_val[28][1] = 0xa1310003;
+	}
+
+	memcpy((void *)(vaddr_t)suspend_func,
+			(void *)(vaddr_t)imx6_suspend, func_size);
+
+	pm_ocram_free_area = (vaddr_t)suspend_func + func_size;
+
+	dcache_clean_range((void *)suspend_ocram_base,
+			(pm_ocram_free_area - suspend_ocram_base + 4));
+	/* Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/pm-imx7.c b/core/drivers/pm/imx/pm-imx7.c
new file mode 100644
index 000000000..8307861a0
--- /dev/null
+++ b/core/drivers/pm/imx/pm-imx7.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2019 NXP
+ */
+
+#include <arm.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <io.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <string.h>
+
+#define READ_DATA_FROM_HARDWARE		0
+
+static uint32_t imx7d_ddrc_lpddr3_setting[][2] = {
+	{ 0x0, READ_DATA_FROM_HARDWARE },
+	{ 0x1a0, READ_DATA_FROM_HARDWARE },
+	{ 0x1a4, READ_DATA_FROM_HARDWARE },
+	{ 0x1a8, READ_DATA_FROM_HARDWARE },
+	{ 0x64, READ_DATA_FROM_HARDWARE },
+	{ 0xd0, READ_DATA_FROM_HARDWARE },
+	{ 0xdc, READ_DATA_FROM_HARDWARE },
+	{ 0xe0, READ_DATA_FROM_HARDWARE },
+	{ 0xe4, READ_DATA_FROM_HARDWARE },
+	{ 0xf4, READ_DATA_FROM_HARDWARE },
+	{ 0x100, READ_DATA_FROM_HARDWARE },
+	{ 0x104, READ_DATA_FROM_HARDWARE },
+	{ 0x108, READ_DATA_FROM_HARDWARE },
+	{ 0x10c, READ_DATA_FROM_HARDWARE },
+	{ 0x110, READ_DATA_FROM_HARDWARE },
+	{ 0x114, READ_DATA_FROM_HARDWARE },
+	{ 0x118, READ_DATA_FROM_HARDWARE },
+	{ 0x120, READ_DATA_FROM_HARDWARE },
+	{ 0x11c, READ_DATA_FROM_HARDWARE },
+	{ 0x180, READ_DATA_FROM_HARDWARE },
+	{ 0x184, READ_DATA_FROM_HARDWARE },
+	{ 0x190, READ_DATA_FROM_HARDWARE },
+	{ 0x194, READ_DATA_FROM_HARDWARE },
+	{ 0x200, READ_DATA_FROM_HARDWARE },
+	{ 0x204, READ_DATA_FROM_HARDWARE },
+	{ 0x210, READ_DATA_FROM_HARDWARE },
+	{ 0x214, READ_DATA_FROM_HARDWARE },
+	{ 0x218, READ_DATA_FROM_HARDWARE },
+	{ 0x240, READ_DATA_FROM_HARDWARE },
+	{ 0x244, READ_DATA_FROM_HARDWARE },
+};
+
+static uint32_t imx7d_ddrc_phy_lpddr3_setting[][2] = {
+	{ 0x0, READ_DATA_FROM_HARDWARE },
+	{ 0x4, READ_DATA_FROM_HARDWARE },
+	{ 0x8, READ_DATA_FROM_HARDWARE },
+	{ 0x10, READ_DATA_FROM_HARDWARE },
+	{ 0xb0, READ_DATA_FROM_HARDWARE },
+	{ 0x1c, READ_DATA_FROM_HARDWARE },
+	{ 0x9c, READ_DATA_FROM_HARDWARE },
+	{ 0x7c, READ_DATA_FROM_HARDWARE },
+	{ 0x80, READ_DATA_FROM_HARDWARE },
+	{ 0x84, READ_DATA_FROM_HARDWARE },
+	{ 0x88, READ_DATA_FROM_HARDWARE },
+	{ 0x6c, READ_DATA_FROM_HARDWARE },
+	{ 0x20, READ_DATA_FROM_HARDWARE },
+	{ 0x30, READ_DATA_FROM_HARDWARE },
+	{ 0x50, 0x01000008 },
+	{ 0x50, 0x00000008 },
+	{ 0xc0, 0x0e407304 },
+	{ 0xc0, 0x0e447304 },
+	{ 0xc0, 0x0e447306 },
+	{ 0xc0, 0x0e4c7304 },
+	{ 0xc0, 0x0e487306 },
+};
+
+static uint32_t imx7d_ddrc_ddr3_setting[][2] = {
+	{ 0x0, READ_DATA_FROM_HARDWARE },
+	{ 0x1a0, READ_DATA_FROM_HARDWARE },
+	{ 0x1a4, READ_DATA_FROM_HARDWARE },
+	{ 0x1a8, READ_DATA_FROM_HARDWARE },
+	{ 0x64, READ_DATA_FROM_HARDWARE },
+	{ 0x490, READ_DATA_FROM_HARDWARE },
+	{ 0xd0, READ_DATA_FROM_HARDWARE },
+	{ 0xd4, READ_DATA_FROM_HARDWARE },
+	{ 0xdc, READ_DATA_FROM_HARDWARE },
+	{ 0xe0, READ_DATA_FROM_HARDWARE },
+	{ 0xe4, READ_DATA_FROM_HARDWARE },
+	{ 0xf4, READ_DATA_FROM_HARDWARE },
+	{ 0x100, READ_DATA_FROM_HARDWARE },
+	{ 0x104, READ_DATA_FROM_HARDWARE },
+	{ 0x108, READ_DATA_FROM_HARDWARE },
+	{ 0x10c, READ_DATA_FROM_HARDWARE },
+	{ 0x110, READ_DATA_FROM_HARDWARE },
+	{ 0x114, READ_DATA_FROM_HARDWARE },
+	{ 0x120, READ_DATA_FROM_HARDWARE },
+	{ 0x180, READ_DATA_FROM_HARDWARE },
+	{ 0x190, READ_DATA_FROM_HARDWARE },
+	{ 0x194, READ_DATA_FROM_HARDWARE },
+	{ 0x200, READ_DATA_FROM_HARDWARE },
+	{ 0x204, READ_DATA_FROM_HARDWARE },
+	{ 0x214, READ_DATA_FROM_HARDWARE },
+	{ 0x218, READ_DATA_FROM_HARDWARE },
+	{ 0x240, READ_DATA_FROM_HARDWARE },
+	{ 0x244, READ_DATA_FROM_HARDWARE },
+};
+
+static uint32_t imx7d_ddrc_phy_ddr3_setting[][2] = {
+	{ 0x0, READ_DATA_FROM_HARDWARE },
+	{ 0x4, READ_DATA_FROM_HARDWARE },
+	{ 0x10, READ_DATA_FROM_HARDWARE },
+	{ 0xb0, READ_DATA_FROM_HARDWARE },
+	{ 0x9c, READ_DATA_FROM_HARDWARE },
+	{ 0x7c, READ_DATA_FROM_HARDWARE },
+	{ 0x80, READ_DATA_FROM_HARDWARE },
+	{ 0x84, READ_DATA_FROM_HARDWARE },
+	{ 0x88, READ_DATA_FROM_HARDWARE },
+	{ 0x6c, READ_DATA_FROM_HARDWARE },
+	{ 0x20, READ_DATA_FROM_HARDWARE },
+	{ 0x30, READ_DATA_FROM_HARDWARE },
+	{ 0x50, 0x01000010 },
+	{ 0x50, 0x00000010 },
+	{ 0xc0, 0x0e407304 },
+	{ 0xc0, 0x0e447304 },
+	{ 0xc0, 0x0e447306 },
+	{ 0xc0, 0x0e447304 },
+	{ 0xc0, 0x0e407306 },
+};
+
+static struct imx7_pm_data imx7d_pm_data_lpddr3 = {
+	.ddrc_num = ARRAY_SIZE(imx7d_ddrc_lpddr3_setting),
+	.ddrc_offset = imx7d_ddrc_lpddr3_setting,
+	.ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_lpddr3_setting),
+	.ddrc_phy_offset = imx7d_ddrc_phy_lpddr3_setting,
+};
+
+static struct imx7_pm_data imx7d_pm_data_ddr3 = {
+	.ddrc_num = ARRAY_SIZE(imx7d_ddrc_ddr3_setting),
+	.ddrc_offset = imx7d_ddrc_ddr3_setting,
+	.ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_ddr3_setting),
+	.ddrc_phy_offset = imx7d_ddrc_phy_ddr3_setting,
+};
+
+struct imx7_pm_info *pm_info;
+
+int imx7_suspend_init(void)
+{
+	uint32_t i;
+	uint32_t (*ddrc_offset_array)[2];
+	uint32_t (*ddrc_phy_offset_array)[2];
+	uint32_t suspend_ocram_base = core_mmu_get_va(
+						imx_get_ocram_tz_start_addr() +
+						SUSPEND_OCRAM_OFFSET,
+						MEM_AREA_TEE_COHERENT,
+						SUSPEND_OCRAM_SIZE);
+	struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base;
+	struct imx7_pm_data *pm_data;
+
+	pm_info = p;
+
+	dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+	DMSG("%x %x\n", suspend_ocram_base, sizeof(*p));
+
+	p->pa_base = imx_get_ocram_tz_start_addr() + SUSPEND_OCRAM_OFFSET;
+	p->tee_resume = virt_to_phys((void *)(vaddr_t)ca7_cpu_resume);
+	p->pm_info_size = sizeof(*p);
+	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, 1);
+	p->ccm_pa_base = CCM_BASE;
+	p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC, 1);
+	p->ddrc_pa_base = DDRC_BASE;
+	p->ddrc_phy_va_base = core_mmu_get_va(DDRC_PHY_BASE, MEM_AREA_IO_SEC,
+					      1);
+	p->ddrc_phy_pa_base = DDRC_PHY_BASE;
+	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, 1);
+	p->src_pa_base = SRC_BASE;
+	p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE,
+						MEM_AREA_IO_SEC, 1);
+	p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE;
+	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, 1);
+	p->gpc_pa_base = GPC_BASE;
+	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC, 1);
+	p->anatop_pa_base = ANATOP_BASE;
+	p->snvs_va_base = core_mmu_get_va(SNVS_BASE, MEM_AREA_IO_SEC, 1);
+	p->snvs_pa_base = SNVS_BASE;
+	p->lpsr_va_base = core_mmu_get_va(LPSR_BASE, MEM_AREA_IO_SEC, 1);
+	p->lpsr_pa_base = LPSR_BASE;
+	p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC, 1);
+	p->gic_pa_base = GIC_BASE;
+
+	/* TODO:lpsr disabled now */
+	io_write32(p->lpsr_va_base, 0);
+
+	p->ddr_type = imx_get_ddr_type();
+	switch (p->ddr_type) {
+	case IMX_DDR_TYPE_DDR3:
+		pm_data = &imx7d_pm_data_ddr3;
+		break;
+	case IMX_DDR_TYPE_LPDDR3:
+		pm_data = &imx7d_pm_data_lpddr3;
+		break;
+	default:
+		panic("Not supported ddr type\n");
+		break;
+	}
+
+	p->ddrc_num = pm_data->ddrc_num;
+	p->ddrc_phy_num = pm_data->ddrc_phy_num;
+	ddrc_offset_array = pm_data->ddrc_offset;
+	ddrc_phy_offset_array = pm_data->ddrc_phy_offset;
+
+	for (i = 0; i < p->ddrc_num; i++) {
+		p->ddrc_val[i][0] = ddrc_offset_array[i][0];
+		if (ddrc_offset_array[i][1] == READ_DATA_FROM_HARDWARE)
+			p->ddrc_val[i][1] = io_read32(p->ddrc_va_base +
+						      ddrc_offset_array[i][0]);
+		else
+			p->ddrc_val[i][1] = ddrc_offset_array[i][1];
+
+		if (p->ddrc_val[i][0] == 0xd0)
+			p->ddrc_val[i][1] |= 0xc0000000;
+	}
+
+	/* initialize DDRC PHY settings */
+	for (i = 0; i < p->ddrc_phy_num; i++) {
+		p->ddrc_phy_val[i][0] = ddrc_phy_offset_array[i][0];
+		if (ddrc_phy_offset_array[i][1] == READ_DATA_FROM_HARDWARE)
+			p->ddrc_phy_val[i][1] =
+				io_read32(p->ddrc_phy_va_base +
+					  ddrc_phy_offset_array[i][0]);
+		else
+			p->ddrc_phy_val[i][1] = ddrc_phy_offset_array[i][1];
+	}
+
+	memcpy((void *)(suspend_ocram_base + sizeof(*p)),
+	       (void *)(vaddr_t)imx7_suspend, SUSPEND_OCRAM_SIZE - sizeof(*p));
+
+	dcache_clean_range((void *)suspend_ocram_base, SUSPEND_OCRAM_SIZE);
+
+	/*
+	 * Note that IRAM IOSEC map, if changed to MEM map,
+	 * need to flush cache
+	 */
+	icache_inv_all();
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/pm-imx7ulp.c b/core/drivers/pm/imx/pm-imx7ulp.c
new file mode 100644
index 000000000..fa6945f40
--- /dev/null
+++ b/core/drivers/pm/imx/pm-imx7ulp.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <arm.h>
+#include <imx_pm.h>
+#include <io.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <string.h>
+
+static uint32_t imx7ulp_mmdc_io_lpddr3_offset[] = {
+	0x128, 0xf8, 0xd8, 0x108,
+	0x104, 0x124, 0x80, 0x84,
+	0x88, 0x8c, 0x120, 0x10c,
+	0x110, 0x114, 0x118, 0x90,
+	0x94, 0x98, 0x9c, 0xe0,
+	0xe4,
+};
+
+static uint32_t imx7ulp_mmdc_lpddr3_offset[] = {
+	0x01c, 0x800, 0x85c, 0x890,
+	0x848, 0x850, 0x81c, 0x820,
+	0x824, 0x828, 0x82c, 0x830,
+	0x834, 0x838, 0x8c0, 0x8b8,
+	0x004, 0x00c, 0x010, 0x038,
+	0x014, 0x018, 0x02c, 0x030,
+	0x040, 0x000, 0x01c, 0x01c,
+	0x01c, 0x01c, 0x01c, 0x01c,
+	0x01c, 0x01c, 0x01c, 0x01c,
+	0x01c, 0x01c, 0x83c, 0x020,
+	0x800, 0x004, 0x404, 0x01c,
+};
+
+static const uint32_t imx7ulp_lpddr3_script[] = {
+	0x00008000, 0xA1390003, 0x0D3900A0, 0x00400000,
+	0x40404040, 0x40404040, 0x33333333, 0x33333333,
+	0x33333333, 0x33333333, 0xf3333333, 0xf3333333,
+	0xf3333333, 0xf3333333, 0x24922492, 0x00000800,
+	0x00020052, 0x292C42F3, 0x00100A22, 0x00120556,
+	0x00C700DB, 0x00211718, 0x0F9F26D2, 0x009F0E10,
+	0x0000003F, 0xC3190000, 0x00008050, 0x00008058,
+	0x003F8030, 0x003F8038, 0xFF0A8030, 0xFF0A8038,
+	0x04028030, 0x04028038, 0x83018030, 0x83018038,
+	0x01038030, 0x01038038, 0x20000000, 0x00001800,
+	0xA1310000, 0x00020052, 0x00011006, 0x00000000,
+};
+
+static struct imx7ulp_pm_data imx7ulp_lpddr3_pm_data = {
+	.mmdc_io_num = ARRAY_SIZE(imx7ulp_mmdc_io_lpddr3_offset),
+	.mmdc_io_offset = imx7ulp_mmdc_io_lpddr3_offset,
+	.mmdc_num = ARRAY_SIZE(imx7ulp_mmdc_lpddr3_offset),
+	.mmdc_offset = imx7ulp_mmdc_lpddr3_offset,
+};
+
+struct imx7ulp_pm_info *pm_info;
+
+int imx7ulp_suspend_init(void)
+{
+	uint32_t i;
+	uint32_t suspend_ocram_base = (uint32_t)core_mmu_get_va(
+			(paddr_t)LP_OCRAM_START +
+			SUSPEND_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT,
+			SUSPEND_OCRAM_SIZE);
+	struct imx7ulp_pm_info *p =
+			(struct imx7ulp_pm_info *)suspend_ocram_base;
+	struct imx7ulp_pm_data *pm_data;
+	uint32_t *mmdc_io_offset_array, *mmdc_offset_array;
+
+	pm_info = p;
+
+	dcache_op_level1(DCACHE_OP_CLEAN_INV);
+
+	DMSG("%x %x\n", suspend_ocram_base, sizeof(*p));
+
+	p->pbase = LP_OCRAM_START + SUSPEND_OCRAM_OFFSET;
+	p->resume_addr = (paddr_t)virt_to_phys(
+				(void *)(vaddr_t)imx7ulp_cpu_resume);
+	p->pm_info_size = sizeof(*p);
+	p->scg1_base = core_mmu_get_va(SCG1_BASE, MEM_AREA_IO_SEC, SCG1_SIZE);
+	p->smc1_base = core_mmu_get_va(SMC1_BASE, MEM_AREA_IO_SEC, SMC1_SIZE);
+	p->mmdc_base = core_mmu_get_va(MMDC_BASE, MEM_AREA_IO_SEC, MMDC_SIZE);
+	p->mmdc_io_base = core_mmu_get_va(MMDC_IO_BASE, MEM_AREA_IO_SEC,
+					  MMDC_IO_SIZE);
+	p->sim_base = core_mmu_get_va(SIM_BASE, MEM_AREA_IO_SEC, SIM_SIZE);
+
+	pm_data = &imx7ulp_lpddr3_pm_data;
+	p->mmdc_io_num = pm_data->mmdc_io_num;
+	mmdc_io_offset_array = pm_data->mmdc_io_offset;
+	p->mmdc_num = pm_data->mmdc_num;
+	mmdc_offset_array = pm_data->mmdc_offset;
+
+	for (i = 0; i < p->mmdc_io_num; i++) {
+		p->mmdc_io_val[i][0] = mmdc_io_offset_array[i];
+		p->mmdc_io_val[i][1] = io_read32(p->mmdc_io_base +
+					      mmdc_io_offset_array[i]);
+	}
+
+	/* initialize MMDC settings */
+	for (i = 0; i < p->mmdc_num; i++)
+		p->mmdc_val[i][0] = mmdc_offset_array[i];
+
+	for (i = 0; i < p->mmdc_num; i++)
+		p->mmdc_val[i][1] = imx7ulp_lpddr3_script[i];
+
+	memcpy((void *)(suspend_ocram_base + sizeof(*p)),
+			(void *)(vaddr_t)imx7ulp_suspend,
+			SUSPEND_OCRAM_SIZE - sizeof(*p));
+
+	dcache_clean_range((void *)suspend_ocram_base, SUSPEND_OCRAM_SIZE);
+	/* Note that IRAM IOSEC map,
+	 * if changed to MEM map, need to flush cache
+	 */
+	icache_inv_all();
+
+	DMSG("%s %x\n", __func__, (uint32_t)(p->resume_addr));
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/pm-imx8.c b/core/drivers/pm/imx/pm-imx8.c
new file mode 100644
index 000000000..35010dbf6
--- /dev/null
+++ b/core/drivers/pm/imx/pm-imx8.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2019, 2021 NXP
+ *
+ */
+
+#include <kernel/pm.h>
+#include <kernel/thread.h>
+#include <imx_pm.h>
+#include <trace.h>
+#include <util.h>
+
+/**
+ * @brief   Handler for cpu resume
+ *
+ * @param[in] a0   Max power level powered down
+ * @param[in] a1   Unusued
+ *
+ * @retval TEE_SUCCESS         Success
+ * @retval TEE_ERROR_GENERIC   Generic error
+ */
+unsigned long thread_cpu_resume_handler(unsigned long a0,
+						unsigned long a1 __unused)
+{
+	TEE_Result retstatus = TEE_SUCCESS;
+
+	/* a0 is the highest power level which was powered down.
+	 * This can be:
+	 *	PM_CORE_LEVEL i.e. "0" in case of Core level
+	 *	PM_CLUSTER_LEVEL i.e. "1" in case of Cluster level
+	 *	PM_SYSTEM_LEVEL "2" in case of System level
+	 */
+	if (a0 == PM_SYSTEM_LEVEL)
+		retstatus = pm_change_state(PM_OP_RESUME,
+					PM_HINT_CONTEXT_STATE);
+
+	/* Returned value to the ATF.
+	 * If it is not 0, the system will panic
+	 */
+	return retstatus;
+}
+
+/**
+ * @brief   Handler for cpu suspend
+ *
+ * @param[in] a0   Max power level to power down
+ * @param[in] a1   Unusued
+ *
+ * @retval TEE_SUCCESS         Success
+ * @retval TEE_ERROR_GENERIC   Generic error
+ */
+unsigned long thread_cpu_suspend_handler(unsigned long a0,
+						unsigned long a1 __unused)
+{
+	TEE_Result retstatus = TEE_SUCCESS;
+
+	/* a0 is the highest power level which was powered down.
+	 * This can be:
+	 *	PM_CORE_LEVEL i.e. "0" in case of Core level
+	 *	PM_CLUSTER_LEVEL i.e. "1" in case of Cluster level
+	 *	PM_SYSTEM_LEVEL "2" in case of System level
+	 */
+	if (a0 == PM_SYSTEM_LEVEL)
+		retstatus = pm_change_state(PM_OP_SUSPEND,
+					PM_HINT_CONTEXT_STATE);
+
+	/* Returned value to the ATF.
+	 * If it is not 0, the system will panic
+	 */
+	return retstatus;
+}
diff --git a/core/drivers/pm/imx/psci.c b/core/drivers/pm/imx/psci.c
index a0e21ddba..963dea59e 100644
--- a/core/drivers/pm/imx/psci.c
+++ b/core/drivers/pm/imx/psci.c
@@ -1,27 +1,34 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
  * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2023 NXP
+ * Copyright 2018-2020 NXP
  *
  * Peng Fan <peng.fan@nxp.com>
  */
 
+#include <console.h>
 #include <drivers/imx_snvs.h>
+#include <drivers/imx_uart.h>
 #include <drivers/imx_wdog.h>
 #include <io.h>
 #include <imx.h>
+#include <imx-regs.h>
 #include <kernel/boot.h>
 #include <kernel/misc.h>
+#include <kernel/pm.h>
 #include <mm/core_mmu.h>
 #include <mm/core_memprot.h>
+#include <platform_config.h>
 #include <stdint.h>
+#include <sm/optee_smc.h>
 #include <sm/psci.h>
 #include <sm/std_smc.h>
 
+#include "imx_pm.h"
 #include "local.h"
 
-#define IOMUXC_GPR5_OFFSET 0x14
-#define ARM_WFI_STAT_MASK(n) BIT(n)
+#define IOMUXC_GPR5_OFFSET	0x14
+#define ARM_WFI_STAT_MASK(n)	BIT(n)
 
 int psci_features(uint32_t psci_fid)
 {
@@ -29,6 +36,7 @@ int psci_features(uint32_t psci_fid)
 	case ARM_SMCCC_VERSION:
 	case PSCI_PSCI_FEATURES:
 	case PSCI_VERSION:
+	case PSCI_CPU_SUSPEND:
 	case PSCI_CPU_OFF:
 #ifdef CFG_BOOT_SECONDARY_REQUEST
 	case PSCI_CPU_ON:
@@ -124,6 +132,232 @@ void __noreturn psci_system_off(void)
 	while (1)
 		;
 }
+__weak int imx6ul_lowpower_idle(uint32_t power_state __unused,
+				uintptr_t entry __unused,
+				uint32_t context_id __unused,
+				struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx6sx_lowpower_idle(uint32_t power_state __unused,
+				uintptr_t entry __unused,
+				uint32_t context_id __unused,
+				struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx6sl_lowpower_idle(uint32_t power_state __unused,
+				uintptr_t entry __unused,
+				uint32_t context_id __unused,
+				struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx6sll_lowpower_idle(uint32_t power_state __unused,
+				uintptr_t entry __unused,
+				uint32_t context_id __unused,
+				struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx6_cpu_suspend(uint32_t power_state __unused,
+			    uintptr_t entry __unused,
+			    uint32_t context_id __unused,
+			    struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx7d_lowpower_idle(uint32_t power_state __unused,
+			uintptr_t entry __unused,
+			uint32_t context_id __unused,
+			struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx7_cpu_suspend(uint32_t power_state __unused,
+			    uintptr_t entry __unused,
+			    uint32_t context_id __unused,
+			    struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+__weak int imx7ulp_cpu_suspend(uint32_t power_state __unused,
+			    uintptr_t entry __unused,
+			    uint32_t context_id __unused,
+			    struct sm_nsec_ctx *nsec __unused)
+{
+	return 0;
+}
+
+int psci_cpu_suspend(uint32_t power_state,
+		     uintptr_t entry, uint32_t context_id __unused,
+		     struct sm_nsec_ctx *nsec)
+{
+	uint32_t id, type;
+	int ret = PSCI_RET_INVALID_PARAMETERS;
+	TEE_Result retstatus;
+
+	id = power_state & PSCI_POWER_STATE_ID_MASK;
+	type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >>
+		PSCI_POWER_STATE_TYPE_SHIFT;
+
+	if ((type != PSCI_POWER_STATE_TYPE_POWER_DOWN) &&
+	    (type != PSCI_POWER_STATE_TYPE_STANDBY)) {
+		DMSG("Not supported %x\n", type);
+		return ret;
+	}
+
+	/*
+	 * ID 0 means suspend
+	 * ID 1 means low power idle
+	 * TODO: follow PSCI StateID sample encoding.
+	 */
+	DMSG("ID = %d\n", id);
+
+	/*
+	 * For i.MX6SL, the cpuidle need the state of LDO 2P5 and
+	 * the busfreq mode. these info is packed in the power_state,
+	 * when doing 'id' check, the LDO 2P5 and busfreq mode info need
+	 * to be removed from 'id'.
+	 */
+	if (soc_is_imx6sl())
+		id &= ~(0x6);
+
+	if (id == 1) {
+		retstatus = pm_change_state(PM_OP_SUSPEND, PM_HINT_CLOCK_STATE);
+		if (retstatus != TEE_SUCCESS) {
+			EMSG("Drivers idle preparation ret 0x%" PRIx32,
+			     retstatus);
+			pm_change_state(PM_OP_RESUME, PM_HINT_CLOCK_STATE);
+			return PSCI_RET_DENIED;
+		}
+
+		if (soc_is_imx6ul() || soc_is_imx6ull())
+			ret = imx6ul_lowpower_idle(power_state, entry,
+						   context_id, nsec);
+		else if (soc_is_imx7ds())
+			ret = imx7d_lowpower_idle(power_state, entry,
+						  context_id, nsec);
+		else if (soc_is_imx6sx())
+			ret = imx6sx_lowpower_idle(power_state, entry,
+						   context_id, nsec);
+		else if (soc_is_imx6sl())
+			ret = imx6sl_lowpower_idle(power_state, entry,
+						   context_id, nsec);
+		else if (soc_is_imx6sll())
+			ret = imx6sll_lowpower_idle(power_state, entry,
+						    context_id, nsec);
+		else {
+			EMSG("Not supported now\n");
+			ret = PSCI_RET_INVALID_PARAMETERS;
+		}
+		pm_change_state(PM_OP_RESUME, PM_HINT_CLOCK_STATE);
+	} else if (id == 0) {
+		retstatus =
+			pm_change_state(PM_OP_SUSPEND, PM_HINT_CONTEXT_STATE);
+		if (retstatus != TEE_SUCCESS) {
+			EMSG("Drivers suspend preparation ret 0x%" PRIx32 "",
+			     retstatus);
+			pm_change_state(PM_OP_RESUME, PM_HINT_CONTEXT_STATE);
+			return PSCI_RET_DENIED;
+		}
+
+		if (soc_is_imx7ds())
+			ret = imx7_cpu_suspend(power_state, entry, context_id,
+					       nsec);
+		else if (soc_is_imx6())
+			ret = imx6_cpu_suspend(power_state, entry, context_id,
+					       nsec);
+		else if (soc_is_imx7ulp())
+			ret = imx7ulp_cpu_suspend(power_state, entry,
+						  context_id, nsec);
+		else {
+			EMSG("Not supported now\n");
+			ret = PSCI_RET_INVALID_PARAMETERS;
+		}
+		pm_change_state(PM_OP_RESUME, PM_HINT_CONTEXT_STATE);
+	} else {
+		DMSG("ID %d not supported\n", id);
+	}
+
+	return ret;
+}
+
+/* Weak functions because files are not all built */
+__weak int imx6ul_cpuidle_init(void)
+{
+	return 0;
+}
+
+__weak int imx6sx_cpuidle_init(void)
+{
+	return 0;
+}
+
+__weak int imx6sll_cpuidle_init(void)
+{
+	return 0;
+}
+
+__weak int imx6_suspend_init(void)
+{
+	return 0;
+}
+
+__weak int imx7d_cpuidle_init(void)
+{
+	return 0;
+}
+
+__weak int imx7_suspend_init(void)
+{
+	return 0;
+}
+
+__weak int imx7ulp_suspend_init(void)
+{
+	return 0;
+}
+
+static TEE_Result init_psci(void)
+{
+	int err = 0;
+
+	/*
+	 * Initialize the power management data.
+	 * It must be done after the OCRAM initialization.
+	 */
+#ifdef CFG_MX7ULP
+	err = imx7ulp_suspend_init();
+#else
+	if (!err) {
+		if (soc_is_imx6())
+			err = imx6_suspend_init();
+		else if (soc_is_imx7ds())
+			err = imx7_suspend_init();
+	}
+
+	if (soc_is_imx6ul() || soc_is_imx6ull()) {
+		err = imx6ul_cpuidle_init();
+	} else if (soc_is_imx6sx()) {
+		err = imx6sx_cpuidle_init();
+	} else if (soc_is_imx6sll()) {
+		err = imx6sll_cpuidle_init();
+	} else if (soc_is_imx7ds()) {
+		err = imx7d_cpuidle_init();
+	}
+
+#endif
+
+	return (!err) ? TEE_SUCCESS : TEE_ERROR_GENERIC;
+}
 
 void __noreturn psci_system_reset(void)
 {
@@ -136,3 +370,5 @@ int __noreturn psci_system_reset2(uint32_t reset_type __unused,
 	/* force WDOG reset */
 	imx_wdog_restart(false);
 }
+
+service_init(init_psci);
diff --git a/core/drivers/pm/imx/sub.mk b/core/drivers/pm/imx/sub.mk
index c3860cbc4..4be6a516e 100644
--- a/core/drivers/pm/imx/sub.mk
+++ b/core/drivers/pm/imx/sub.mk
@@ -1,3 +1,14 @@
-srcs-y += psci.c
-srcs-$(CFG_MX7) += gpcv2.c
-srcs-$(CFG_MX6)$(CFG_MX7) += src.c
+incdirs-y += .
+
+subdirs-$(CFG_BUSFREQ) += busfreq
+subdirs-y += cpuidle
+subdirs-y += suspend
+
+srcs-$(CFG_PSCI_ARM32) += psci.c imx_ocram.c
+srcs-$(CFG_MX7) += pm-imx7.c gpcv2.c
+srcs-$(CFG_MX6) += pm-imx6.c
+srcs-$(CFG_MX6)$(CFG_MX7) += src.c mmdc.c
+srcs-$(CFG_MX7ULP) += pm-imx7ulp.c
+srcs-$(CFG_MX8M)$(CFG_MX8ULP) += pm-imx8.c
+
+asm-defines-y += imx_pm_asm_defines.c
diff --git a/core/drivers/pm/imx/suspend/imx6_suspend.c b/core/drivers/pm/imx/suspend/imx6_suspend.c
new file mode 100644
index 000000000..a05c3da7a
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/imx6_suspend.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018, 2020 NXP
+ *
+ */
+#include <arm.h>
+#include <console.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <kernel/boot.h>
+#include <imx_pl310.h>
+#include <kernel/tz_ssvce_pl310.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <sm/sm.h>
+#include <sm/pm.h>
+
+int (*suspend_func)(uint32_t);
+struct imx_pm_asm_arg suspend_arg;
+
+int imx6_cpu_suspend(uint32_t power_state __unused, uintptr_t entry,
+		     uint32_t context_id __unused, struct sm_nsec_ctx *nsec)
+{
+	int ret;
+
+	DMSG("=== Entering Suspend ===\n");
+
+	/* Store non-sec ctx regs */
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)&suspend_arg, suspend_func);
+
+	/*
+	 * Sometimes sm_pm_cpu_suspend may not really suspended,
+	 * we need to check it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	/* Restore register of different mode in secure world */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	/*
+	 * Call the Wakeup Late function to restore some
+	 * HW configuration (e.g. TZASC)
+	 */
+	plat_cpu_wakeup_late();
+
+	/* Back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+	/*
+	 * There is no driver suspend/resume framework in op-tee.
+	 * Add L2 code here, a bit different from OPTEE initialization
+	 * when bootup. Now MMU is up, L1 enabled.
+	 */
+#ifdef CFG_PL310
+	if (pl310_enabled(pl310_base()))
+		return 0;
+
+	arm_cl2_config(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+	arm_cl2_enable(pl310_base());
+	arm_cl2_invbyway(pl310_base());
+#endif
+
+	DMSG("=== Back from Suspended ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/suspend/imx7_suspend.c b/core/drivers/pm/imx/suspend/imx7_suspend.c
new file mode 100644
index 000000000..c4596bdfa
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/imx7_suspend.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2020 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <arm.h>
+#include <console.h>
+#include <drivers/imx_uart.h>
+#include <io.h>
+#include <imx.h>
+#include <imx_pm.h>
+#include <kernel/boot.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <sm/sm.h>
+#include <sm/pm.h>
+#include <stdint.h>
+
+int imx7_cpu_suspend(uint32_t power_state __unused, uintptr_t entry,
+		     uint32_t context_id __unused, struct sm_nsec_ctx *nsec)
+{
+	uint32_t suspend_ocram_base = core_mmu_get_va(
+						imx_get_ocram_tz_start_addr() +
+						SUSPEND_OCRAM_OFFSET,
+							MEM_AREA_TEE_COHERENT,
+							SUSPEND_OCRAM_SIZE);
+	struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base;
+	int ret;
+
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(suspend_ocram_base + sizeof(*p)));
+	/*
+	 * Sometimes sm_pm_cpu_suspend may not really suspended,
+	 * we need to check it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	/*
+	 * Call the Wakeup Late function to restore some
+	 * HW configuration (e.g. TZASC)
+	 */
+	plat_cpu_wakeup_late();
+
+	/* Set entry for back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+	DMSG("=== Back from Suspended ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/suspend/imx7ulp_suspend.c b/core/drivers/pm/imx/suspend/imx7ulp_suspend.c
new file mode 100644
index 000000000..8d0632554
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/imx7ulp_suspend.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2017-2018, 2021 NXP
+ *
+ */
+#include <arm.h>
+#include <console.h>
+#include <io.h>
+#include <imx_pm.h>
+#include <imx.h>
+#include <kernel/boot.h>
+#include <kernel/misc.h>
+#include <kernel/panic.h>
+#include <kernel/cache_helpers.h>
+#include <mm/core_mmu.h>
+#include <mm/core_memprot.h>
+#include <platform_config.h>
+#include <stdint.h>
+#include <sm/optee_smc.h>
+#include <sm/psci.h>
+#include <sm/sm.h>
+#include <sm/pm.h>
+
+/*
+ * TODO:
+ * Suspend/resume and low power idle share this,
+ * But do we need to provide different ones for them?
+ */
+
+static int suspended_init;
+static vaddr_t gpio_base[4];
+static vaddr_t scg1_base;
+static vaddr_t pcc2_base;
+static vaddr_t pcc3_base;
+static vaddr_t iomuxc1_base;
+static vaddr_t tpm5_base;
+static vaddr_t smc1_base;
+static vaddr_t pmc0_base;
+static vaddr_t pmc1_base;
+static vaddr_t pcr_base[4];
+
+#define GPIO_PDOR	0x0
+#define GPIO_PDDR	0x14
+
+#define IOMUX_START		0x0
+#define SELECT_INPUT_START	0x200
+
+#define DGO_GPR3	0x60
+#define DGO_GPR4	0x64
+
+#define TPM_SC		0x10
+#define TPM_MOD		0x18
+#define TPM_C0SC	0x20
+#define TPM_C0V		0x24
+
+#define PMPROT	0x8
+#define PMCTRL	0x10
+#define PMSTAT	0x18
+#define SRS	0x20
+#define RPC	0x24
+#define SSRS	0x28
+#define SRIE	0x2c
+#define SRIF	0x30
+#define CSRE	0x34
+#define MR	0x40
+
+#define PMC_HSRUN		0x4
+#define PMC_RUN			0x8
+#define PMC_VLPR		0xc
+#define PMC_STOP		0x10
+#define PMC_VLPS		0x14
+#define PMC_LLS			0x18
+#define PMC_VLLS		0x1c
+#define PMC_STATUS		0x20
+#define PMC_CTRL		0x24
+#define PMC_SRAMCTRL_0		0x28
+#define PMC_SRAMCTRL_1		0x2c
+#define PMC_SRAMCTRL_2		0x30
+
+#define BM_PMPROT_AHSRUN	SHIFT_U32(1, 7)
+#define BM_PMPROT_AVLP		SHIFT_U32(1, 5)
+#define BM_PMPROT_ALLS		SHIFT_U32(1, 3)
+#define BM_PMPROT_AVLLS		SHIFT_U32(1, 1)
+
+#define BM_PMCTRL_STOPA		SHIFT_U32(1, 24)
+#define BM_PMCTRL_PSTOPO	SHIFT_U32(3, 16)
+#define BM_PMCTRL_RUNM		SHIFT_U32(3, 8)
+#define BM_PMCTRL_STOPM		SHIFT_U32(7, 0)
+
+#define BM_VLPS_RBBEN		SHIFT_U32(1, 28)
+
+#define BM_CTRL_LDOEN		SHIFT_U32(1, 31)
+#define BM_CTRL_LDOOKDIS	SHIFT_U32(1, 30)
+
+#define BM_VLLS_MON1P2HVDHP	SHIFT_U32(1, 5)
+#define BM_VLLS_MON1P2LVDHP	SHIFT_U32(1, 4)
+
+#define BP_PMCTRL_STOPM		0
+#define BP_PMCTRL_PSTOPO	16
+
+static uint32_t tpm5_regs[4];
+static uint32_t pcc2_regs[][2] = {
+	{0x20, 0}, {0x3c, 0}, {0x40, 0}, {0x6c, 0},
+	{0x84, 0}, {0x90, 0}, {0x94, 0}, {0x98, 0},
+	{0x9c, 0}, {0xa4, 0}, {0xa8, 0}, {0xac, 0},
+	{0xb0, 0}, {0xb4, 0}, {0xb8, 0}, {0xc4, 0},
+	{0xcc, 0}, {0xd0, 0}, {0xd4, 0}, {0xd8, 0},
+	{0xdc, 0}, {0xe0, 0}, {0xf4, 0}, {0x10c, 0},
+};
+
+static uint32_t pcc3_regs[][2] = {
+	{0x84, 0}, {0x88, 0}, {0x90, 0}, {0x94, 0},
+	{0x98, 0}, {0x9c, 0}, {0xa0, 0}, {0xa4, 0},
+	{0xa8, 0}, {0xac, 0}, {0xb8, 0}, {0xbc, 0},
+	{0xc0, 0}, {0xc4, 0}, {0x140, 0}, {0x144, 0},
+};
+
+static uint32_t scg1_offset[] = {
+	0x14, 0x30, 0x40, 0x304,
+	0x500, 0x504, 0x508, 0x50c,
+	0x510, 0x514, 0x600, 0x604,
+	0x608, 0x60c, 0x610, 0x614,
+	0x104,
+};
+
+static uint32_t pcr_regs[4][20];
+static unsigned int pcr_nums[] = { 20, 12, 16, 20 };
+
+static void imx7ulp_gpio_save(struct imx7ulp_pm_info *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_base); i++) {
+		p->gpio[i][0] = io_read32(gpio_base[i] + GPIO_PDOR);
+		p->gpio[i][1] = io_read32(gpio_base[i] + GPIO_PDDR);
+	}
+}
+
+static void imx7ulp_scg1_save(struct imx7ulp_pm_info *p)
+{
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(scg1_offset); i++)
+		p->scg1[i] = io_read32(scg1_base + scg1_offset[i]);
+}
+
+static void imx7ulp_pcc3_save(struct imx7ulp_pm_info *p __unused)
+{
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pcc3_regs); i++)
+		pcc3_regs[i][1] = io_read32(pcc3_base + pcc3_regs[i][0]);
+}
+
+static void imx7ulp_pcc3_restore(struct imx7ulp_pm_info *p __unused)
+{
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pcc3_regs); i++)
+		io_write32(pcc3_base + pcc3_regs[i][0], pcc3_regs[i][1]);
+}
+
+static void imx7ulp_pcc2_save(struct imx7ulp_pm_info *p __unused)
+{
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pcc2_regs); i++)
+		pcc2_regs[i][1] = io_read32(pcc2_base + pcc2_regs[i][0]);
+}
+
+static void imx7ulp_pcc2_restore(struct imx7ulp_pm_info *p __unused)
+{
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pcc2_regs); i++)
+		io_write32(pcc2_base + pcc2_regs[i][0], pcc2_regs[i][1]);
+}
+
+static inline void imx7ulp_iomuxc_save(struct imx7ulp_pm_info *p)
+{
+	uint32_t i;
+
+	p->iomux_num = MX7ULP_MAX_IOMUX_NUM;
+	p->select_input_num = MX7ULP_MAX_SELECT_INPUT_NUM;
+
+	for (i = 0; i < p->iomux_num; i++)
+		p->iomux_val[i] = io_read32(iomuxc1_base + IOMUX_START + i * 0x4);
+	for (i = 0; i < p->select_input_num; i++)
+		p->select_input_val[i] = io_read32(iomuxc1_base +
+						SELECT_INPUT_START + i * 0x4);
+}
+
+static void imx7ulp_tpm_save(struct imx7ulp_pm_info *p __unused)
+{
+	tpm5_regs[0] = io_read32(tpm5_base + TPM_SC);
+	tpm5_regs[1] = io_read32(tpm5_base + TPM_MOD);
+	tpm5_regs[2] = io_read32(tpm5_base + TPM_C0SC);
+	tpm5_regs[3] = io_read32(tpm5_base + TPM_C0V);
+}
+
+static void imx7ulp_tpm_restore(struct imx7ulp_pm_info *p __unused)
+{
+	io_write32(tpm5_base + TPM_SC, tpm5_regs[0]);
+	io_write32(tpm5_base + TPM_MOD, tpm5_regs[1]);
+	io_write32(tpm5_base + TPM_C0SC, tpm5_regs[2]);
+	io_write32(tpm5_base + TPM_C0V, tpm5_regs[3]);
+}
+
+static void imx7ulp_pcr_save(void)
+{
+	unsigned int j = 0;
+	unsigned int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(pcr_nums); i++) {
+		for (j = 0; j < pcr_nums[i]; j++)
+			pcr_regs[i][j] = io_read32(pcr_base[i] + j * 0x4);
+	}
+}
+
+static void imx7ulp_pcr_restore(void)
+{
+	unsigned int j = 0;
+	unsigned int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(pcr_nums); i++) {
+		for (j = 0; j < pcr_nums[i]; j++)
+			io_write32(pcr_base[i] + j * 0x4, pcr_regs[i][j]);
+	}
+}
+
+static void imx7ulp_set_dgo(struct imx7ulp_pm_info *p, uint32_t val)
+{
+	io_write32(p->sim_base + DGO_GPR3, val);
+	io_write32(p->sim_base + DGO_GPR4, val);
+}
+
+static int imx7ulp_set_lpm(enum imx7ulp_sys_pwr_mode mode)
+{
+	uint32_t val1 = BM_PMPROT_AHSRUN | BM_PMPROT_AVLP | BM_PMPROT_AVLLS;
+	uint32_t val2 = io_read32(smc1_base + PMCTRL);
+	uint32_t val3 = io_read32(pmc0_base + PMC_CTRL);
+
+	val2 &= ~(BM_PMCTRL_RUNM |
+		BM_PMCTRL_STOPM | BM_PMCTRL_PSTOPO);
+	val3 |= BM_CTRL_LDOOKDIS;
+
+	switch (mode) {
+	case RUN:
+		/* system/bus clock enabled */
+		val2 |= 0x3 << BP_PMCTRL_PSTOPO;
+		break;
+	case WAIT:
+		/* system clock disabled, bus clock enabled */
+		val2 |= 0x2 << BP_PMCTRL_PSTOPO;
+		break;
+	case STOP:
+		/* system/bus clock disabled */
+		val2 |= 0x1 << BP_PMCTRL_PSTOPO;
+		break;
+	case VLPS:
+		val2 |= 0x2 << BP_PMCTRL_STOPM;
+		break;
+	case VLLS:
+		val2 |= 0x4 << BP_PMCTRL_STOPM;
+		break;
+	default:
+		panic("Not correct lpm mode\n");
+	}
+
+	io_write32(smc1_base + PMPROT, val1);
+	io_write32(smc1_base + PMCTRL, val2);
+	io_write32(pmc0_base + PMC_CTRL, val3);
+
+	return 0;
+}
+
+int imx7ulp_cpu_suspend(uint32_t power_state __unused, uintptr_t entry,
+			uint32_t context_id __unused, struct sm_nsec_ctx *nsec)
+{
+	uint32_t i;
+	int ret = 0;
+	/*
+	 * TODO: move the code to a platform init place, note that
+	 * need to change kernel pm-imx6.c to avoid use LPRAM.
+	 */
+	uint32_t suspend_ocram_base = core_mmu_get_va(LP_OCRAM_START +
+						      SUSPEND_OCRAM_OFFSET,
+						      MEM_AREA_TEE_COHERENT,
+						      BUSFREQ_MAX_SIZE);
+	struct imx7ulp_pm_info *p =
+			(struct imx7ulp_pm_info *)suspend_ocram_base;
+	uint32_t type;
+
+	type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >>
+		PSCI_POWER_STATE_TYPE_SHIFT;
+
+	if (!suspended_init) {
+		for (i = 0; i < ARRAY_SIZE(gpio_base); i++) {
+			gpio_base[i] = core_mmu_get_va(GPIOC_BASE + i * 0x40,
+						       MEM_AREA_IO_SEC,
+						       0x40);
+		}
+
+		for (i = 0; i < ARRAY_SIZE(pcr_base); i++) {
+			pcr_base[i] = core_mmu_get_va(PCR_BASE + i * 0x10000,
+						      MEM_AREA_IO_SEC,
+						      PCR_SIZE);
+		}
+
+		scg1_base = core_mmu_get_va(SCG1_BASE, MEM_AREA_IO_SEC,
+					    SCG1_SIZE);
+		pcc2_base = core_mmu_get_va(PCC2_BASE, MEM_AREA_IO_SEC,
+					    PCC2_SIZE);
+		pcc3_base = core_mmu_get_va(PCC3_BASE, MEM_AREA_IO_SEC,
+					    PCC3_SIZE);
+		iomuxc1_base = core_mmu_get_va(IOMUXC1_BASE, MEM_AREA_IO_SEC,
+					       IOMUXC1_SIZE);
+		tpm5_base = core_mmu_get_va(TPM5_BASE, MEM_AREA_IO_SEC,
+					    TPM5_SIZE);
+		pmc0_base = core_mmu_get_va(PMC0_BASE, MEM_AREA_IO_SEC,
+					    PMC0_SIZE);
+		pmc1_base = core_mmu_get_va(PMC1_BASE, MEM_AREA_IO_SEC,
+					    PMC1_SIZE);
+		smc1_base = core_mmu_get_va(SMC1_BASE, MEM_AREA_IO_SEC,
+					    SMC1_SIZE);
+		suspended_init = 1;
+	}
+
+	/* Store non-sec ctx regs */
+	sm_save_unbanked_regs(&nsec->ub_regs);
+
+	if (type == PSCI_POWER_STATE_TYPE_POWER_DOWN) {
+		imx7ulp_gpio_save(p);
+		imx7ulp_scg1_save(p);
+		imx7ulp_pcc2_save(p);
+		imx7ulp_pcc3_save(p);
+		imx7ulp_tpm_save(p);
+		imx7ulp_iomuxc_save(p);
+		imx7ulp_pcr_save();
+		imx7ulp_set_lpm(VLLS);
+		ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(suspend_ocram_base + sizeof(*p)));
+		imx7ulp_pcc2_restore(p);
+		imx7ulp_pcc3_restore(p);
+		imx7ulp_set_dgo(p, 0);
+		imx7ulp_tpm_restore(p);
+		imx7ulp_pcr_restore();
+		imx7ulp_set_lpm(RUN);
+	} else if (type == PSCI_POWER_STATE_TYPE_STANDBY) {
+		imx7ulp_set_lpm(VLPS);
+		io_write32(pmc1_base + PMC_VLPS,
+			io_read32(pmc1_base + PMC_VLPS) | BM_VLPS_RBBEN);
+		ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
+				(suspend_ocram_base + sizeof(*p)));
+		io_write32(pmc1_base + PMC_VLPS,
+			io_read32(pmc1_base + PMC_VLPS) & ~BM_VLPS_RBBEN);
+		imx7ulp_set_lpm(RUN);
+	}
+
+	/*
+	 * Sometimes cpu_suspend may not really suspended, we need to check
+	 * it's return value to restore reg or not
+	 */
+	if (ret < 0) {
+		DMSG("=== Not suspended, GPC IRQ Pending ===\n");
+		return 0;
+	}
+
+	/* Restore register of different mode in secure world */
+	sm_restore_unbanked_regs(&nsec->ub_regs);
+
+	/* Back to Linux */
+	nsec->mon_lr = (uint32_t)entry;
+
+	boot_primary_init_intc();
+
+	DMSG("=== Back from Suspended ===\n");
+
+	return 0;
+}
diff --git a/core/drivers/pm/imx/suspend/psci-suspend-imx6.S b/core/drivers/pm/imx/suspend/psci-suspend-imx6.S
new file mode 100644
index 000000000..af057c495
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/psci-suspend-imx6.S
@@ -0,0 +1,797 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018, 2020 NXP
+ *
+ */
+
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <platform_config.h>
+#include <imx_pm.h>
+
+	.section .text.psci.suspend
+
+	/* Check if the cpu is cortex-a7 */
+	.macro is_cortex_a7
+
+	/* Read the primary cpu number is MPIDR */
+	mrc     p15, 0, r5, c0, c0, 0
+	ldr     r6, =0xfff0
+	and     r5, r5, r6
+	ldr     r6, =0xc070
+	cmp     r5, r6
+
+	.endm
+
+#define L2X0_CACHE_SYNC                 0x730
+#define PL310_8WAYS_MASK		0x00FF
+#define PL310_16WAYS_UPPERMASK		0xFF00
+#define PL310_LOCKDOWN_SZREG		4
+#define PL310_LOCKDOWN_NBREGS		8
+
+	.macro  sync_l2_cache pl310_base
+
+	/* sync L2 cache to drain L2's buffers to DRAM. */
+#ifdef CFG_PL310
+	mov	r6, #0x0
+	str	r6, [\pl310_base, #L2X0_CACHE_SYNC]
+1:
+	ldr	r6, [\pl310_base, #L2X0_CACHE_SYNC]
+	ands	r6, r6, #0x1
+	bne	1b
+#endif
+
+	.endm
+
+	/* r11 must be MMDC0 base address */
+	/* r12 must be MMDC1 base address */
+	.macro reset_read_fifo
+
+	/* reset read FIFO, RST_RD_FIFO */
+	ldr	r7, =MX6Q_MMDC_MPDGCTRL0
+	ldr	r6, [r11, r7]
+	orr     r6, r6, #(1 << 31)
+	str	r6, [r11, r7]
+2:
+	ldr	r6, [r11, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	2b
+
+	/* reset FIFO a second time */
+	ldr	r6, [r11, r7]
+	orr     r6, r6, #(1 << 31)
+	str	r6, [r11, r7]
+3:
+	ldr	r6, [r11, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	3b
+
+	/* check if second channel mode is enabled */
+	ldr	r7, =MX6Q_MMDC_MISC
+	ldr	r6, [r11, r7]
+	ands	r6, r6, #(1 << 2)
+	beq	6f
+
+	ldr	r7, =MX6Q_MMDC_MPDGCTRL0
+	ldr	r6, [r12, r7]
+	orr     r6, r6, #(1 << 31)
+	str	r6, [r12, r7]
+4:
+	ldr	r6, [r12, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	4b
+
+	ldr	r6, [r12, r7]
+	orr     r6, r6, #(1 << 31)
+	str	r6, [r12, r7]
+5:
+	ldr	r6, [r12, r7]
+	ands	r6, r6, #(1 << 31)
+	bne	5b
+
+6:
+	.endm
+
+	/* r11 must be MMDC base address */
+	/* r12 must be MMDC1 base address */
+	.macro mmdc_out_and_auto_self_refresh
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r11, #MX6Q_MMDC_MAPSR]
+7:
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	7b
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r11, #MX6Q_MMDC_MAPSR]
+
+	/* check if lppdr2 2 channel mode is enabled */
+	ldr	r7, =MX6Q_MMDC_MISC
+	ldr	r6, [r11, r7]
+	ands	r6, r6, #(1 << 2)
+	beq	9f
+
+	ldr	r7, [r12, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 21)
+	str	r7, [r12, #MX6Q_MMDC_MAPSR]
+8:
+	ldr	r7, [r12, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	bne	8b
+
+	ldr	r7, [r12, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r12, #MX6Q_MMDC_MAPSR]
+9:
+	.endm
+
+	/* r10 must be iomuxc base address */
+	.macro resume_iomuxc_gpr
+
+	add	r10, r10, #0x4000
+	/* IOMUXC GPR DRAM_RESET_BYPASS */
+	ldr	r4, [r10, #0x8]
+	bic	r4, r4, #(0x1 << 27)
+	str	r4, [r10, #0x8]
+	/* IOMUXC GPR DRAM_CKE_BYPASS */
+	ldr	r4, [r10, #0x8]
+	bic	r4, r4, #(0x1 << 31)
+	str	r4, [r10, #0x8]
+
+	.endm
+
+	.macro	resume_io
+
+	/* restore MMDC IO */
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_IOMUXC_P_OFF]
+
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+10:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x8
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	10b
+
+	cmp	r5, #0x0
+	/* Here only MMDC0 is set */
+	ldreq	r11, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldrne	r11, [r0, #PM_INFO_MMDC0_P_OFF]
+	ldreq	r12, [r0, #PM_INFO_MMDC1_V_OFF]
+	ldrne	r12, [r0, #PM_INFO_MMDC1_P_OFF]
+
+	reset_read_fifo
+	mmdc_out_and_auto_self_refresh
+
+	.endm
+
+	.macro	resume_mmdc_io
+
+	cmp	r5, #0x0
+	ldreq	r10, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_IOMUXC_P_OFF]
+	ldreq	r11, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldrne	r11, [r0, #PM_INFO_MMDC0_P_OFF]
+	ldreq	r12, [r0, #PM_INFO_MMDC1_V_OFF]
+	ldrne	r12, [r0, #PM_INFO_MMDC1_P_OFF]
+
+	/* resume mmdc iomuxc settings */
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+11:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x8
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	11b
+
+	/* check whether we need to restore MMDC */
+	cmp	r5, #0x0
+	beq	12f
+
+	/* check whether last suspend is with M/F mix off */
+	ldr	r9, [r0, #PM_INFO_GPC_P_OFF]
+	ldr	r6, [r9, #0x220]
+	cmp	r6, #0x0
+	bne	13f
+12:
+	resume_iomuxc_gpr
+	reset_read_fifo
+
+	b	17f
+13:
+	/*
+	 * This part of code is executed only if
+	 * MMU is OFF
+     */
+
+	/* restore MMDC settings */
+	ldr	r6, [r0, #PM_INFO_MMDC_NUM_OFF]
+	ldr	r7, =PM_INFO_MMDC_VAL_OFF
+	add	r7, r7, r0
+14:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	str	r9, [r11, r8]
+	subs	r6, r6, #0x1
+	bne	14b
+
+	/* let DDR enter self-refresh */
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX6Q_MMDC_MAPSR]
+15:
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	beq	15b
+
+	resume_iomuxc_gpr
+	reset_read_fifo
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX6Q_MMDC_MAPSR]
+16:
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	bne	16b
+
+	/* kick off MMDC */
+	ldr	r4, =0x0
+	str	r4, [r11, #0x1c]
+
+17:
+	mmdc_out_and_auto_self_refresh
+
+	.endm
+
+	.macro store_ttbr1
+
+	/* Store TTBR1 to pm_info->ttbr1 */
+	read_ttbr1	r7
+	str		r7, [r0, #PM_INFO_TTBR1_OFF]
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr	r6
+	bic		r6, r6, #SCTLR_Z
+	write_sctlr	r6
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	/* Store the IRAM table in TTBR1 */
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r6, [r6]
+	dsb
+	isb
+	write_ttbr1 r6
+
+	/* Read TTBCR and set PD0=1 and PD1=0 */
+	/* Warning: unknown behaviour if LPAE is enabled */
+#ifdef CFG_WITH_LPAE
+#error "Case not supported"
+#endif
+	read_ttbcr	r6
+	bic		r6, r6, #0x30
+	orr		r6, r6, #0x10
+	write_ttbcr	r6
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+17:
+	.endm
+
+	.macro restore_ttbr1
+
+	/* Enable L1 data cache. */
+	read_sctlr	r6
+	orr		r6, r6, #SCTLR_C
+	write_sctlr	r6
+	dsb
+	isb
+
+	/* Restore TTBCR */
+	/* Read TTBCR and set PD0=0 and PD1=0 */
+	read_ttbcr	r6
+	bic		r6, r6, #0x30
+	write_ttbcr	r6
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	/* Enable Branch Prediction */
+	read_sctlr	r6
+	orr		r6, r6, #SCTLR_Z
+	write_sctlr	r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* Restore TTBR1, get the origin ttbr1 from pm info */
+	ldr		r6, [r0, #PM_INFO_TTBR1_OFF]
+	write_ttbr1	r6
+	isb
+
+#ifdef CFG_PL310
+	/* Unlock L2 */
+	ldr		r5, [r0, #PM_INFO_PL310_V_OFF]
+	unlock_l2	r5
+#endif
+	.endm
+
+	/* Expect PL310 base address */
+	/* Uses r6, r7, r11 */
+	.macro lock_l2 base
+
+	ldr	r6, [\base, #PL310_AUX_CTRL]
+	tst	r6, #PL310_AUX_16WAY_BIT
+	mov	r6, #PL310_8WAYS_MASK
+	orrne	r6, #PL310_16WAYS_UPPERMASK
+	mov	r7, #PL310_LOCKDOWN_NBREGS
+	add	r11, \base, #PL310_DCACHE_LOCKDOWN_BASE
+19:	/* lock Dcache and Icache */
+	str	r6, [r11], #PL310_LOCKDOWN_SZREG
+	str	r6, [r11], #PL310_LOCKDOWN_SZREG
+	subs	r7, r7, #1
+	bne	19b
+
+	.endm
+
+	/* Expect PL310 base address */
+	/* Uses r6, r7, r11 */
+	.macro unlock_l2 base
+
+	ldr	r6, [\base, #PL310_AUX_CTRL]
+	tst	r6, #PL310_AUX_16WAY_BIT
+	mov	r6, #0x00
+	orrne	r6, #0x0000
+	mov	r7, #PL310_LOCKDOWN_NBREGS
+	add	r11, \base, #PL310_DCACHE_LOCKDOWN_BASE
+20:	/* unlock Dcache and Icache */
+	str	r6, [r11], #PL310_LOCKDOWN_SZREG
+	str	r6, [r11], #PL310_LOCKDOWN_SZREG
+	subs	r7, r7, #1
+	bne	20b
+
+	.endm
+
+	.align 3
+/**
+ * @brief   Prepare and switch the device to enter in suspend mode.
+ *          Function is executed in OCRAM.
+ *          If success, the device is reset.
+ *          Operation can be cancel and in this case the device is
+ *          not reset, and returns to the caller.
+ *
+ *          Input parameter is a reference to a imx_pm_asm_arg structure
+ *          containing the function argument (refer to the imx_pm.h)
+ *
+ * @param[in] r0  reference to the structure imx_pm_asm_arg in normal
+ *                memory.
+ */
+FUNC imx6_suspend, :
+	/* Get the function arguments data */
+	ldr	r1, [r0, #PM_ASM_ARG_PA_ADDR_OFF]
+	ldr	r0, [r0, #PM_ASM_ARG_PM_INFO_OFF]
+
+	/*
+	 * Calculate the Physical address of the resume function
+	 * to initialize the SRC register
+	 */
+	ldr	r6, =imx6_suspend
+	ldr	r9, =resume
+	sub	r9, r6
+	add	r9, r1
+
+	ldr	r1, [r0, #PM_INFO_PBASE_OFF]
+
+	/*
+	 * make sure TLB contain the addr we want,
+	 * as we will access them after MMDC IO floated.
+
+	 * TODO: Can we drop this?
+	 * If we disable MMU in secureity, may need to change to P_OFFSET
+	 */
+
+	ldr	r11, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r6, [r11, #0x0]
+	ldr	r11, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r6, [r11, #0x0]
+	ldr	r11, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr	r6, [r11, #0x0]
+
+	/* use r11 to store the IO address */
+	ldr	r11, [r0, #PM_INFO_SRC_V_OFF]
+	/*
+	 * store physical resume addr and pm_info address.
+	 * SRC_GPR2 will be passed as arg to resume func.
+	 */
+	str	r9, [r11, #SRC_GPR1]
+	str	r1, [r11, #SRC_GPR2]
+
+	push	{r0 - r10, lr}
+
+#if CFG_PL310
+	/* Save pm_info to r12 */
+	mov	r12, r0
+
+	/* Lock L2 */
+	ldr		r0, [r12, #PM_INFO_PL310_V_OFF]
+	lock_l2		r0
+
+	/* Sync L2 */
+	ldr		r11, [r12, #PM_INFO_PL310_V_OFF]
+	sync_l2_cache	r11
+
+	/* Clean L2 */
+	ldr	r0, [r12, #PM_INFO_PL310_V_OFF]
+	ldr	r1, =arm_cl2_cleaninvbyway
+	mov	lr, pc
+	bx	r1
+
+	/* Sync L2 */
+	ldr		r11, [r12, #PM_INFO_PL310_V_OFF]
+	sync_l2_cache	r11
+#endif
+	/* Clean L1$ */
+	ldr	r1, =dcache_op_all
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	mov	lr, pc
+	bx	r1
+
+	/* Disable L1$ */
+	read_sctlr	r0
+	bic		r0, r0, #SCTLR_C
+	write_sctlr	r0
+	dsb
+	isb
+
+	/* Clean L1$ */
+	ldr	r1, =dcache_op_all
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	mov	lr, pc
+	bx	r1
+
+	pop	{r0 - r10, lr}
+
+	store_ttbr1
+
+	ldr	r11, [r0, #PM_INFO_MMDC0_V_OFF]
+	ldr	r12, [r0, #PM_INFO_MMDC1_V_OFF]
+	/*
+	 * put DDR explicitly into self-refresh and
+	 * disable automatic power savings.
+	 */
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r11, #MX6Q_MMDC_MAPSR]
+
+	/* make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r11, #MX6Q_MMDC_MAPSR]
+
+poll_dvfs_set:
+	ldr	r7, [r11, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	poll_dvfs_set
+
+	/* check if lppdr2 2 channel mode is enabled */
+	ldr	r7, =MX6Q_MMDC_MISC
+	ldr	r6, [r11, r7]
+	ands	r6, r6, #(1 << 2)
+	beq	skip_self_refresh_ch1
+
+	ldr	r7, [r12, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r12, #MX6Q_MMDC_MAPSR]
+
+	ldr	r7, [r12, #MX6Q_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 21)
+	str	r7, [r12, #MX6Q_MMDC_MAPSR]
+
+poll_dvfs_set_ch1:
+	ldr	r7, [r12, #MX6Q_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 25)
+	beq	poll_dvfs_set_ch1
+
+skip_self_refresh_ch1:
+	/* use r11 to store the IO address */
+	ldr	r11, [r0, #PM_INFO_IOMUXC_V_OFF]
+	ldr	r6, [r0, #PM_INFO_MMDC_IO_NUM_OFF]
+	ldr	r8, =PM_INFO_MMDC_IO_VAL_OFF
+	add	r8, r8, r0
+set_mmdc_io_lpm:
+	ldr	r7, [r8], #0x8
+	ldr	r9, [r8], #0x4
+	str	r9, [r11, r7]
+	subs	r6, r6, #0x1
+	bne	set_mmdc_io_lpm
+
+	/* check whether it supports Mega/Fast off */
+	ldr	r6, [r0, #PM_INFO_MMDC_NUM_OFF]
+	cmp	r6, #0x0
+	beq	set_mmdc_lpm_done
+
+	/* IOMUXC GPR DRAM_RESET */
+	add	r11, r11, #0x4000
+	ldr	r6, [r11, #0x8]
+	orr	r6, r6, #(0x1 << 28)
+	str	r6, [r11, #0x8]
+
+	/* IOMUXC GPR DRAM_RESET_BYPASS */
+	ldr	r6, [r11, #0x8]
+	orr	r6, r6, #(0x1 << 27)
+	str	r6, [r11, #0x8]
+
+	/* IOMUXC GPR DRAM_CKE_BYPASS */
+	ldr	r6, [r11, #0x8]
+	orr	r6, r6, #(0x1 << 31)
+	str	r6, [r11, #0x8]
+set_mmdc_lpm_done:
+
+	/*
+	 * mask all GPC interrupts before
+	 * enabling the RBC counters to
+	 * avoid the counter starting too
+	 * early if an interrupt is already
+	 * pending.
+	 */
+	ldr	r11, [r0, #PM_INFO_GPC_V_OFF]
+	ldr	r6, [r11, #MX6Q_GPC_IMR1]
+	ldr	r7, [r11, #MX6Q_GPC_IMR2]
+	ldr	r8, [r11, #MX6Q_GPC_IMR3]
+	ldr	r9, [r11, #MX6Q_GPC_IMR4]
+
+	ldr	r10, =0xffffffff
+	str	r10, [r11, #MX6Q_GPC_IMR1]
+	str	r10, [r11, #MX6Q_GPC_IMR2]
+	str	r10, [r11, #MX6Q_GPC_IMR3]
+	str	r10, [r11, #MX6Q_GPC_IMR4]
+
+	/*
+	 * enable the RBC bypass counter here
+	 * to hold off the interrupts. RBC counter
+	 * = 32 (1ms), Minimum RBC delay should be
+	 * 400us for the analog LDOs to power down.
+	 */
+	ldr	r11, [r0, #PM_INFO_CCM_V_OFF]
+	ldr	r10, [r11, #MX6Q_CCM_CCR]
+	bic	r10, r10, #(0x3f << 21)
+	orr	r10, r10, #(0x20 << 21)
+	str	r10, [r11, #MX6Q_CCM_CCR]
+
+	/* enable the counter. */
+	ldr	r10, [r11, #MX6Q_CCM_CCR]
+	orr	r10, r10, #(0x1 << 27)
+	str	r10, [r11, #MX6Q_CCM_CCR]
+
+	/* unmask all the GPC interrupts. */
+	ldr	r11, [r0, #PM_INFO_GPC_V_OFF]
+	str	r6, [r11, #MX6Q_GPC_IMR1]
+	str	r7, [r11, #MX6Q_GPC_IMR2]
+	str	r8, [r11, #MX6Q_GPC_IMR3]
+	str	r9, [r11, #MX6Q_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (3usec)
+	 * ARM is at 1GHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r6, =2000
+rbc_loop:
+	subs	r6, r6, #0x1
+	bne	rbc_loop
+
+	/*
+	 * ERR005852 Analog: Transition from Deep Sleep Mode to
+	 * LDO Bypass Mode may cause the slow response of the
+	 * VDDARM_CAP output.
+	 *
+	 * Software workaround:
+	 * if internal ldo(VDDARM) bypassed, switch to analog bypass
+	 * mode (0x1E), prio to entering DSM, and then, revert to the
+	 * normal bypass mode, when exiting from DSM.
+	 */
+	ldr	r11, [r0, #PM_INFO_ANATOP_V_OFF]
+	ldr	r10, [r11, #MX6Q_ANATOP_CORE]
+	and	r10, r10, #0x1f
+	cmp	r10, #0x1f
+	bne	ldo_check_done1
+ldo_analog_bypass:
+	ldr	r10, [r11, #MX6Q_ANATOP_CORE]
+	bic	r10, r10, #0x1f
+	orr	r10, r10, #0x1e
+	str	r10, [r11, #MX6Q_ANATOP_CORE]
+ldo_check_done1:
+
+	dsb
+	dmb
+	isb
+
+	/* enter stop mode */
+	wfi
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	/*
+	 * Run to here means there is pending GPC wakeup interrupt.
+	 */
+	/* restore it with 0x1f if use ldo bypass mode.*/
+	ldr	r10, [r11, #MX6Q_ANATOP_CORE]
+	and	r10, r10, #0x1f
+	cmp	r10, #0x1e
+	bne	ldo_check_done2
+ldo_bypass_restore:
+	ldr	r10, [r11, #MX6Q_ANATOP_CORE]
+	orr	r10, r10, #0x1f
+	str	r10, [r11, #MX6Q_ANATOP_CORE]
+ldo_check_done2:
+	mov	r5, #0x0
+
+	/* check whether it supports Mega/Fast off */
+	ldr	r6, [r0, #PM_INFO_MMDC_NUM_OFF]
+	cmp	r6, #0x0
+	beq	only_resume_io
+	resume_mmdc_io
+	b	resume_mmdc_done
+only_resume_io:
+	resume_io
+resume_mmdc_done:
+	/* Clear CORE0's entry and arg */
+	ldr	r11, [r0, #PM_INFO_SRC_V_OFF]
+	mov	r7, #0
+	str	r7, [r11, #SRC_GPR1]
+	str	r7, [r11, #SRC_GPR2]
+
+	restore_ttbr1
+
+	/* return to cpu_suspend */
+	bx	lr
+
+resume:
+	/* monitor mode */
+	mov	r3, #0x16
+	mov	r4, #((1 << 6) | (1 << 7))
+	orr	r3, r3, r4
+	msr	cpsr, r3
+	nop
+	nop
+	nop
+	/*
+	 * Invalidate all instruction caches to PoU
+	 * Invalidate all branch predictors
+	 */
+	write_iciallu
+	write_bpiall
+	dsb
+	isb
+
+	/* r0 is get from SCR_GPR2, ROM did this */
+	ldr	r11, [r0, #PM_INFO_ANATOP_P_OFF]
+	ldr	r7, [r11, #MX6Q_ANATOP_CORE]
+	and	r7, r7, #0x1f
+	cmp	r7, #0x1e
+	bne	ldo_check_done3
+	ldr	r7, [r11, #MX6Q_ANATOP_CORE]
+	orr	r7, r7, #0x1f
+	str	r7, [r11, #MX6Q_ANATOP_CORE]
+ldo_check_done3:
+	/* Jump to v7_cpu_resume */
+	ldr	lr, [r0, #PM_INFO_TEE_RESUME_OFF]
+
+	/* Clear CORE0's entry and arg */
+	ldr	r11, [r0, #PM_INFO_SRC_P_OFF]
+	mov	r7, #0
+	str	r7, [r11, #SRC_GPR1]
+	str	r7, [r11, #SRC_GPR2]
+
+	ldr	r3, [r0, #PM_INFO_DDR_TYPE_OFF]
+	mov	r5, #0x1
+
+	/* check whether it supports Mega/Fast off */
+	ldr	r6, [r0, #PM_INFO_MMDC_NUM_OFF]
+	cmp	r6, #0x0
+	beq	dsm_only_resume_io
+	resume_mmdc_io
+	b	dsm_resume_mmdc_done
+dsm_only_resume_io:
+	ldr	r3, [r0, #PM_INFO_DDR_TYPE_OFF]
+	resume_io
+dsm_resume_mmdc_done:
+
+	/* Enable Instruction cache and Branch predictors */
+	read_sctlr r6
+	orr r6, #SCTLR_Z
+	add r6, r6, #SCTLR_I
+	write_sctlr r6
+	isb
+
+
+	bx	lr
+END_FUNC imx6_suspend
+
+/**
+ * @brief   Calculates and returns the suspend function size
+ *
+ * @retval  function size in bytes
+ */
+FUNC get_imx6_suspend_size, :
+	subs	r0, pc, #8
+	ldr		r1, =imx6_suspend
+	sub		r0, r0, r1
+	bx		lr
+END_FUNC get_imx6_suspend_size
+
+/*
+ * Note: VA = PA, for TEE_RAM.
+ * This maybe changed in future.
+ */
+FUNC v7_cpu_resume, :
+/* arm_cl1_d_invbysetway */
+	mov	r0, #0
+	mcr	p15, 2, r0, c0, c0, 0
+	isb
+
+_inv_dcache_off:
+	mov	r0, #0
+_inv_nextWay:
+	mov	r1, #0
+_inv_nextLine:
+	orr	r2, r0, r1
+	mcr	p15, 0, r2, c7, c6, 2
+	add	r1, r1, #1 << LINE_FIELD_OFFSET
+	cmp	r1, #1 << LINE_FIELD_OVERFLOW
+	bne     _inv_nextLine
+	add     r0, r0, #1 << WAY_FIELD_OFFSET
+	cmp     r0, #0
+	bne     _inv_nextWay
+
+	dsb
+	isb
+
+	/*
+	 * No stack, scratch r0-r3
+	 * TODO: Need to use specific configure, but not plat_xxx.
+	 * Because plat_xx maybe changed in future, we can not rely on it.
+	 * Need handle sp carefully.
+	 */
+	blx	plat_cpu_reset_early
+
+	b	sm_pm_cpu_resume
+END_FUNC v7_cpu_resume
diff --git a/core/drivers/pm/imx/suspend/psci-suspend-imx7.S b/core/drivers/pm/imx/suspend/psci-suspend-imx7.S
new file mode 100644
index 000000000..431868900
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/psci-suspend-imx7.S
@@ -0,0 +1,689 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2020 NXP
+ */
+
+#include <arm.h>
+#include <arm32_macros.S>
+#include <asm.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <kernel/cache_helpers.h>
+#include <kernel/tz_proc_def.h>
+#include <kernel/tz_ssvce_def.h>
+#include <platform_config.h>
+
+#define MX7_SRC_GPR1	0x74
+#define MX7_SRC_GPR2	0x78
+#define GPC_PGC_C0	0x800
+#define GPC_PGC_FM	0xa00
+#define ANADIG_SNVS_MISC_CTRL	0x380
+#define ANADIG_SNVS_MISC_CTRL_SET 0x384
+#define ANADIG_SNVS_MISC_CTRL_CLR 0x388
+#define ANADIG_DIGPROG	0x800
+#define DDRC_STAT	0x4
+#define DDRC_PWRCTL	0x30
+#define DDRC_PSTAT	0x3fc
+#define DDRC_PCTRL_0	0x490
+#define DDRC_DFIMISC	0x1b0
+#define DDRC_SWCTL	0x320
+#define DDRC_SWSTAT	0x324
+#define DDRPHY_LP_CON0	0x18
+
+#define CCM_SNVS_LPCG	0x250
+#define MX7D_GPC_IMR1	0x30
+#define MX7D_GPC_IMR2	0x34
+#define MX7D_GPC_IMR3	0x38
+#define MX7D_GPC_IMR4	0x3c
+
+/*
+ * The code in this file is copied to coherent on-chip ram memory,
+ * without any dependency on code/data in tee memory(DDR).
+ */
+	.section .text.psci.suspend
+	.align 3
+
+	.macro	disable_l1_dcache
+
+	/*
+	 * flush L1 data cache before clearing SCTLR.C bit.
+	 */
+	push	{r0 - r10, lr}
+	ldr	r1, =dcache_op_all
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	/* disable d-cache */
+	read_sctlr r7
+	bic	r7, r7, #SCTLR_C
+	write_sctlr r7
+	dsb
+	isb
+
+	push	{r0 - r10, lr}
+	ldr	r1, =dcache_op_all
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	.endm
+
+	.macro store_ttbr
+
+	/* Store TTBR1 to pm_info->ttbr1 */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
+
+	/* Store TTBR0 to pm_info->ttbr1 */
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
+
+	/* Disable Branch Prediction */
+	read_sctlr r6
+	bic	r6, r6, #SCTLR_Z
+	write_sctlr r6
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r6, [r6]
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR1/0 */
+	write_ttbr1 r6
+	write_ttbr0 r6
+
+	/* Read TTBCR and set PD0=1 */
+	read_ttbcr r6
+	orr	r6, r6, #TTBCR_PD0
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+	.endm
+
+	.macro restore_ttbr
+
+	/* Enable L1 data cache. */
+	read_sctlr r6
+	orr	r6, r6, #SCTLR_C
+	write_sctlr r6
+
+	dsb
+	isb
+
+	/* Restore TTBCR */
+	/* Read TTBCR and set PD0=0 */
+	read_ttbcr r6
+	bic	r6, r6, #TTBCR_PD0
+	write_ttbcr r6
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	/* Enable Branch Prediction */
+	read_sctlr r6
+	orr	r6, r6, #SCTLR_Z
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* Restore TTBR1/0, get the origin ttbr1/0 from pm info */
+	ldr	r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
+	write_ttbr1 r7
+	ldr	r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
+	write_ttbr0 r7
+	isb
+
+	.endm
+
+	.macro ddrc_enter_self_refresh
+
+	ldr	r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+
+	/* let DDR out of self-refresh */
+	ldr	r7, =0x0
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	/* wait rw port_busy clear */
+	ldr	r6, =BIT32(16)
+	orr	r6, r6, #0x1
+1:
+	ldr	r7, [r11, #DDRC_PSTAT]
+	ands	r7, r7, r6
+	bne	1b
+
+	/* enter self-refresh bit 5 */
+	ldr	r7, =BIT32(5)
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	/* wait until self-refresh mode entered */
+2:
+	ldr	r7, [r11, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x3
+	bne	2b
+3:
+	ldr	r7, [r11, #DDRC_STAT]
+	ands	r7, r7, #0x20
+	beq	3b
+
+	/* disable dram clk */
+	ldr	r7, [r11, #DDRC_PWRCTL]
+	orr	r7, r7, #BIT32(3)
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	.endm
+
+	.macro ddrc_exit_self_refresh
+
+	cmp	r5, #0x0
+	ldreq	r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+	ldrne	r11, [r0, #PM_INFO_MX7_DDRC_P_OFF]
+
+	/* let DDR out of self-refresh */
+	ldr	r7, =0x0
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	/* wait until self-refresh mode entered */
+4:
+	ldr	r7, [r11, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x3
+	beq	4b
+
+	/* enable auto self-refresh */
+	ldr	r7, [r11, #DDRC_PWRCTL]
+	orr	r7, r7, #BIT32(0)
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	.endm
+
+	.macro wait_delay
+5:
+	subs	r6, r6, #0x1
+	bne	5b
+
+	.endm
+
+	.macro ddr_enter_retention
+
+	ldr	r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+
+	/* let DDR out of self-refresh */
+	ldr	r7, =0x0
+	str	r7, [r11, #DDRC_PCTRL_0]
+
+	/* wait rw port_busy clear */
+	ldr	r6, =BIT32(16)
+	orr	r6, r6, #0x1
+6:
+	ldr	r7, [r11, #DDRC_PSTAT]
+	ands	r7, r7, r6
+	bne	6b
+
+	ldr	r11, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+	/* enter self-refresh bit 5 */
+	ldr	r7, =BIT32(5)
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	/* wait until self-refresh mode entered */
+7:
+	ldr	r7, [r11, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x3
+	bne	7b
+8:
+	ldr	r7, [r11, #DDRC_STAT]
+	ands	r7, r7, #0x20
+	beq	8b
+
+	/* disable dram clk */
+	ldr	r7, =BIT32(5)
+	orr	r7, r7, #BIT32(3)
+	str	r7, [r11, #DDRC_PWRCTL]
+
+	ldr	r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+	ldr	r7, [r11, #ANADIG_DIGPROG]
+	and	r7, r7, #0xff
+	cmp	r7, #0x11
+	bne	10f
+
+	/* TO 1.1 */
+	ldr	r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
+	ldr	r7, =0x38000000
+	str	r7, [r11]
+
+	/* LPSR mode need to use TO1.0 flow as IOMUX lost power */
+	ldr	r10, [r0, #PM_INFO_MX7_LPSR_V_OFF]
+	ldr	r7, [r10]
+	cmp	r7, #0x0
+	beq	11f
+10:
+	/* reset ddr_phy  */
+	ldr	r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+	ldr	r7, =0x0
+	str	r7, [r11, #ANADIG_SNVS_MISC_CTRL]
+
+	/* delay 7 us */
+	ldr	r6, =6000
+	wait_delay
+
+	ldr	r11, [r0, #PM_INFO_MX7_SRC_V_OFF]
+	ldr	r6, =0x1000
+	ldr	r7, [r11, r6]
+	orr	r7, r7, #0x1
+	str	r7, [r11, r6]
+11:
+	/* turn off ddr power */
+	ldr	r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+	ldr	r7, =(0x1 << 29)
+	str	r7, [r11, #ANADIG_SNVS_MISC_CTRL_SET]
+
+	ldr	r11, [r0, #PM_INFO_MX7_SRC_V_OFF]
+	ldr	r6, =0x1000
+	ldr	r7, [r11, r6]
+	orr	r7, r7, #0x1
+	str	r7, [r11, r6]
+
+	.endm
+
+	.macro ddr_exit_retention
+
+	cmp	r5, #0x0
+	ldreq	r1, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
+	ldrne	r1, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
+	ldreq	r2, [r0, #PM_INFO_MX7_SRC_V_OFF]
+	ldrne	r2, [r0, #PM_INFO_MX7_SRC_P_OFF]
+	ldreq	r3, [r0, #PM_INFO_MX7_DDRC_V_OFF]
+	ldrne	r3, [r0, #PM_INFO_MX7_DDRC_P_OFF]
+	ldreq	r4, [r0, #PM_INFO_MX7_DDRC_PHY_V_OFF]
+	ldrne	r4, [r0, #PM_INFO_MX7_DDRC_PHY_P_OFF]
+	ldreq	r10, [r0, #PM_INFO_MX7_CCM_V_OFF]
+	ldrne	r10, [r0, #PM_INFO_MX7_CCM_P_OFF]
+	ldreq	r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
+	ldrne	r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFF]
+
+	/* turn on ddr power */
+	ldr	r7, =BIT32(29)
+	str	r7, [r1, #ANADIG_SNVS_MISC_CTRL_CLR]
+
+	ldr	r6, =50
+	wait_delay
+
+	/* clear ddr_phy reset */
+	ldr	r6, =0x1000
+	ldr	r7, [r2, r6]
+	orr	r7, r7, #0x3
+	str	r7, [r2, r6]
+	ldr	r7, [r2, r6]
+	bic	r7, r7, #0x1
+	str	r7, [r2, r6]
+13:
+	ldr	r6, [r0, #PM_INFO_MX7_DDRC_REG_NUM_OFF]
+	ldr	r7, =PM_INFO_MX7_DDRC_REG_OFF
+	add	r7, r7, r0
+14:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	str	r9, [r3, r8]
+	subs	r6, r6, #0x1
+	bne	14b
+	ldr	r7, =0x20
+	str	r7, [r3, #DDRC_PWRCTL]
+	ldr	r7, =0x0
+	str	r7, [r3, #DDRC_DFIMISC]
+
+	/* do PHY, clear ddr_phy reset */
+	ldr	r6, =0x1000
+	ldr	r7, [r2, r6]
+	bic	r7, r7, #0x2
+	str	r7, [r2, r6]
+
+	ldr	r7, [r1, #ANADIG_DIGPROG]
+	and	r7, r7, #0xff
+	cmp	r7, #0x11
+	bne	12f
+
+	/*
+	 * TKT262940:
+	 * System hang when press RST for DDR PAD is
+	 * in retention mode, fixed on TO1.1
+	 */
+	ldr	r7, [r11]
+	bic	r7, r7, #BIT32(27)
+	str	r7, [r11]
+	ldr	r7, [r11]
+	bic	r7, r7, #BIT32(29)
+	str	r7, [r11]
+12:
+	ldr	r7, =BIT32(30)
+	str	r7, [r1, #ANADIG_SNVS_MISC_CTRL_SET]
+
+	/* need to delay ~5mS */
+	ldr	r6, =0x100000
+	wait_delay
+
+	ldr	r6, [r0, #PM_INFO_MX7_DDRC_PHY_REG_NUM_OFF]
+	ldr	r7, =PM_INFO_MX7_DDRC_PHY_REG_OFF
+	add	r7, r7, r0
+
+15:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	str	r9, [r4, r8]
+	subs	r6, r6, #0x1
+	bne	15b
+
+	ldr	r7, =0x0
+	add	r9, r10, #0x4000
+	str	r7, [r9, #0x130]
+
+	ldr	r7, =0x170
+	orr	r7, r7, #0x8
+	str	r7, [r11, #0x20]
+
+	ldr	r7, =0x2
+	add	r9, r10, #0x4000
+	str	r7, [r9, #0x130]
+
+	ldr	r7, =0xf
+	str	r7, [r4, #DDRPHY_LP_CON0]
+
+	/* wait until self-refresh mode entered */
+16:
+	ldr	r7, [r3, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x3
+	bne	16b
+	ldr	r7, =0x0
+	str	r7, [r3, #DDRC_SWCTL]
+	ldr	r7, =0x1
+	str	r7, [r3, #DDRC_DFIMISC]
+	ldr	r7, =0x1
+	str	r7, [r3, #DDRC_SWCTL]
+17:
+	ldr	r7, [r3, #DDRC_SWSTAT]
+	and	r7, r7, #0x1
+	cmp	r7, #0x1
+	bne	17b
+18:
+	ldr	r7, [r3, #DDRC_STAT]
+	and	r7, r7, #0x20
+	cmp	r7, #0x20
+	bne	18b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, =0x0
+	str	r7, [r3, #DDRC_PWRCTL]
+19:
+	ldr	r7, [r3, #DDRC_STAT]
+	and	r7, r7, #0x30
+	cmp	r7, #0x0
+	bne	19b
+
+20:
+	ldr	r7, [r3, #DDRC_STAT]
+	and	r7, r7, #0x3
+	cmp	r7, #0x1
+	bne	20b
+
+	/* enable port */
+	ldr	r7, =0x1
+	str	r7, [r3, #DDRC_PCTRL_0]
+
+	/* enable auto self-refresh */
+	ldr	r7, [r3, #DDRC_PWRCTL]
+	orr	r7, r7, #(1 << 0)
+	str	r7, [r3, #DDRC_PWRCTL]
+
+	.endm
+
+FUNC imx7_suspend, :
+	push	{r4-r12}
+
+	/* make sure SNVS clk is enabled */
+	ldr	r11, [r0, #PM_INFO_MX7_CCM_V_OFF]
+	add	r11, r11, #0x4000
+	ldr	r7, =0x3
+	str	r7, [r11, #CCM_SNVS_LPCG]
+
+	/* check whether it is a standby mode */
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r7, [r11, #GPC_PGC_C0]
+	cmp	r7, #0
+	beq	ddr_only_self_refresh
+
+	/*
+	 * The value of r0 is mapped the same in origin table and IRAM table,
+	 * thus no need to care r0 here.
+	 */
+	ldr	r1, [r0, #PM_INFO_MX7_PBASE_OFF]
+	ldr	r4, [r0, #PM_INFO_MX7_SIZE_OFF]
+
+	/*
+	 * counting the resume address in iram
+	 * to set it in SRC register.
+	 */
+	ldr	r6, =imx7_suspend
+	ldr	r7, =resume
+	sub	r7, r7, r6
+	add	r8, r1, r4
+	add	r9, r8, r7
+
+	ldr	r11, [r0, #PM_INFO_MX7_SRC_V_OFF]
+	/* store physical resume addr and pm_info address. */
+	str	r9, [r11, #MX7_SRC_GPR1]
+	str	r1, [r11, #MX7_SRC_GPR2]
+
+	disable_l1_dcache
+
+	store_ttbr
+
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r7, [r11, #GPC_PGC_FM]
+	cmp	r7, #0
+	beq	ddr_only_self_refresh
+
+	ddr_enter_retention
+	/* enter LPSR mode if resume addr is valid */
+	ldr	r11, [r0, #PM_INFO_MX7_LPSR_V_OFF]
+	ldr	r7, [r11]
+	cmp	r7, #0x0
+	beq	ddr_retention_enter_out
+
+	/* disable STOP mode before entering LPSR */
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r7, [r11]
+	bic	r7, #0xf
+	str	r7, [r11]
+
+	/* shut down vddsoc to enter lpsr mode */
+	ldr	r11, [r0, #PM_INFO_MX7_SNVS_V_OFF]
+	ldr	r7, [r11, #0x38]
+	orr	r7, r7, #0x60
+	str	r7, [r11, #0x38]
+	dsb
+wait_shutdown:
+	wfi
+	b	wait_shutdown
+
+ddr_only_self_refresh:
+	ddrc_enter_self_refresh
+	b	wfi
+ddr_retention_enter_out:
+	ldr	r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
+	ldr	r7, =0x0
+	ldr	r8, =0x1000
+	str	r7, [r11, r8]
+
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r4, [r11, #MX7D_GPC_IMR1]
+	ldr	r5, [r11, #MX7D_GPC_IMR2]
+	ldr	r6, [r11, #MX7D_GPC_IMR3]
+	ldr	r7, [r11, #MX7D_GPC_IMR4]
+
+	ldr	r8, =0xffffffff
+	str	r8, [r11, #MX7D_GPC_IMR1]
+	str	r8, [r11, #MX7D_GPC_IMR2]
+	str	r8, [r11, #MX7D_GPC_IMR3]
+	str	r8, [r11, #MX7D_GPC_IMR4]
+
+	/*
+	 * enable the RBC bypass counter here
+	 * to hold off the interrupts. RBC counter
+	 * = 8 (240us). With this setting, the latency
+	 * from wakeup interrupt to ARM power up
+	 * is ~250uS.
+	 */
+	ldr	r8, [r11, #0x14]
+	bic	r8, r8, #(0x3f << 24)
+	orr	r8, r8, #(0x8 << 24)
+	str	r8, [r11, #0x14]
+
+	/* enable the counter. */
+	ldr	r8, [r11, #0x14]
+	orr	r8, r8, #(0x1 << 30)
+	str	r8, [r11, #0x14]
+
+	/* unmask all the GPC interrupts. */
+	str	r4, [r11, #MX7D_GPC_IMR1]
+	str	r5, [r11, #MX7D_GPC_IMR2]
+	str	r6, [r11, #MX7D_GPC_IMR3]
+	str	r7, [r11, #MX7D_GPC_IMR4]
+
+	/*
+	 * now delay for a short while (3usec)
+	 * ARM is at 1GHz at this point
+	 * so a short loop should be enough.
+	 * this delay is required to ensure that
+	 * the RBC counter can start counting in
+	 * case an interrupt is already pending
+	 * or in case an interrupt arrives just
+	 * as ARM is about to assert DSM_request.
+	 */
+	ldr	r7, =2000
+rbc_loop:
+	subs	r7, r7, #0x1
+	bne	rbc_loop
+wfi:
+	dsb
+	/* Enter stop mode */
+	wfi
+
+	mov	r5, #0x0
+
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r7, [r11, #GPC_PGC_FM]
+	cmp	r7, #0
+	beq	wfi_ddr_self_refresh_out
+
+	ddr_exit_retention
+	b	wfi_ddr_retention_out
+wfi_ddr_self_refresh_out:
+	ddrc_exit_self_refresh
+wfi_ddr_retention_out:
+
+	/* check whether it is a standby mode */
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_V_OFF]
+	ldr	r7, [r11, #GPC_PGC_C0]
+	cmp	r7, #0
+	beq	standby_out
+
+	ldr	r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
+	ldr	r7, =0x1
+	ldr	r8, =0x1000
+	str	r7, [r11, r8]
+
+	restore_ttbr
+standby_out:
+	pop	{r4-r12}
+	/* return to suspend finish */
+	bx	lr
+
+resume:
+	write_iciallu
+	write_bpiall
+	dsb
+	isb
+
+	mov     r6, #(SCTLR_I | SCTLR_Z)
+	write_sctlr r6
+	isb
+
+	/*
+	 * After resume back, rom run in SVC mode,
+	 * so we need to switch to monitor mode.
+	 */
+	cps	#CPSR_MODE_MON
+
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_MX7_RESUME_ADDR_OFF]
+	/* clear core0's entry and parameter */
+	ldr	r11, [r0, #PM_INFO_MX7_SRC_P_OFF]
+	mov	r7, #0x0
+	str	r7, [r11, #MX7_SRC_GPR1]
+	str	r7, [r11, #MX7_SRC_GPR2]
+
+	mov	r5, #0x1
+
+	ldr	r11, [r0, #PM_INFO_MX7_GPC_P_OFF]
+	ldr	r7, [r11, #GPC_PGC_FM]
+	cmp	r7, #0
+	beq	dsm_ddr_self_refresh_out
+
+	ddr_exit_retention
+	b	dsm_ddr_retention_out
+dsm_ddr_self_refresh_out:
+	ddrc_exit_self_refresh
+dsm_ddr_retention_out:
+
+	bx	lr
+END_FUNC imx7_suspend
+
+FUNC ca7_cpu_resume, :
+	mov     r0, #0	@ ; write the cache size selection register to be
+	write_csselr r0	@ ; sure we address the data cache
+	isb		@ ; isb to sync the change to the cachesizeid reg
+
+_inv_dcache_off:
+	mov     r0, #0	@ ; set way number to 0
+_inv_nextway:
+	mov     r1, #0	@ ; set line number (=index) to 0
+_inv_nextline:
+	orr     r2, r0, r1	@ ; construct way/index value
+	write_dcisw r2	@ ; invalidate data or unified cache line by set/way
+	add     r1, r1, #1 << LINE_FIELD_OFFSET	@ ; increment the index
+	cmp     r1, #1 << LINE_FIELD_OVERFLOW	@ ; overflow out of set field?
+	bne     _inv_nextline
+	add     r0, r0, #1 << WAY_FIELD_OFFSET	@ ; increment the way number
+	cmp     r0, #0				@ ; overflow out of way field?
+	bne     _inv_nextway
+
+	dsb					@ ; synchronise
+	isb
+
+	/*
+	 * No stack, scratch r0-r3
+	 * TODO: Need to use specific configure, but not plat_xxx.
+	 * Because plat_xx maybe changed in future, we can not rely on it.
+	 * Need handle sp carefully.
+	 */
+	blx	plat_cpu_reset_early
+
+	b	sm_pm_cpu_resume
+END_FUNC ca7_cpu_resume
diff --git a/core/drivers/pm/imx/suspend/psci-suspend-imx7ulp.S b/core/drivers/pm/imx/suspend/psci-suspend-imx7ulp.S
new file mode 100644
index 000000000..db3201b6b
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/psci-suspend-imx7ulp.S
@@ -0,0 +1,597 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <asm.S>
+#include <arm.h>
+#include <arm32_macros.S>
+#include <generated/imx_pm_asm_defines.h>
+#include <platform_config.h>
+#include <kernel/tz_ssvce_def.h>
+#include <kernel/tz_proc_def.h>
+#include <kernel/cache_helpers.h>
+
+#define DGO_CTRL0	0x50
+#define DGO_GPR3	0x60
+#define DGO_GPR4	0x64
+
+#define MX7ULP_MMDC_MISC	0x18
+#define MX7ULP_MMDC_MAPSR	0x404
+#define MX7ULP_MMDC_MPDGCTRL0	0x83c
+
+#define SCG_RCCR	0x14
+#define SCG_DDRCCR	0x30
+#define SCG_NICCCR	0x40
+#define SCG_FIRCDIV	0x304
+#define SCG_APLLCSR	0x500
+#define SCG_APLLDIV	0x504
+#define SCG_APLLCFG	0x508
+#define SCG_APLLPFD	0x50c
+#define SCG_APLLNUM	0x510
+#define SCG_APLLDENOM	0x514
+#define SCG_SPLLCSR	0x600
+#define SCG_SPLLDIV	0x604
+#define SCG_SPLLCFG	0x608
+#define SCG_SPLLPFD	0x60c
+#define SCG_SPLLNUM	0x610
+#define SCG_SPLLDENOM	0x614
+#define SCG_SOSCDIV	0x104
+
+#define PMC1_CTRL	0x24
+
+#define GPIO_PDOR		0x0
+#define GPIO_PDDR		0x14
+#define GPIO_PORT_NUM		0x4
+#define GPIO_PORT_OFF	0x40
+
+#define PMCTRL		0x10
+
+#define IOMUX_OFF		0x0
+#define SELECT_INPUT_OFF	0x200
+
+	.align 3
+
+	.macro store_ttbr1
+
+	/* Store TTBR1 to pm_info->ttbr1 */
+	read_ttbr1 r7
+	str	r7, [r0, #PM_INFO_MX7ULP_TTBR1_V_OFF]
+
+	/* Store TTBR0 to pm_info->ttbr0 */
+	read_ttbr0 r7
+	str	r7, [r0, #PM_INFO_MX7ULP_TTBR0_V_OFF]
+
+	/* Disable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	bic	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the BTAC. */
+	write_bpiallis
+
+	ldr	r6, =iram_tlb_phys_addr
+	ldr	r6, [r6]
+	dsb
+	isb
+
+	/* Store the IRAM table in TTBR */
+	write_ttbr1 r6
+	write_ttbr0 r6
+	/* Read TTBCR and set PD0=1, N = 1 */
+	read_ttbcr r6
+	orr	r6, r6, #0x10
+	write_ttbcr r6
+
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+	isb
+	write_tlbiall
+	isb
+
+	.endm
+
+	.macro restore_ttbr1
+
+	/* Enable L1 data cache. */
+	read_sctlr r6
+	orr	r6, r6, #0x4
+	write_sctlr r6
+
+	dsb
+	isb
+
+	/* Restore TTBCR */
+	/* Read TTBCR and set PD0=0, N = 0 */
+	read_ttbcr r6
+	bic	r6, r6, #0x10
+	write_ttbcr r6
+	dsb
+	isb
+
+	/* flush the TLB */
+	write_tlbiallis
+
+	/* Enable Branch Prediction, Z bit in SCTLR. */
+	read_sctlr r6
+	orr	r6, r6, #0x800
+	write_sctlr r6
+
+	/* Flush the Branch Target Address Cache (BTAC) */
+	write_bpiallis
+
+	/* Restore TTBR1, get the origin ttbr1 from pm info */
+	ldr	r7, [r0, #PM_INFO_MX7ULP_TTBR1_V_OFF]
+	write_ttbr1 r7
+	ldr	r7, [r0, #PM_INFO_MX7ULP_TTBR0_V_OFF]
+	write_ttbr0 r7
+
+	.endm
+
+	.macro	disable_l1_dcache
+
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	push	{r0 - r10, lr}
+	ldr	r1, =dcache_op_all
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+	/* disable d-cache */
+	mrc	p15, 0, r7, c1, c0, 0
+	bic	r7, r7, #(1 << 2)
+	mcr	p15, 0, r7, c1, c0, 0
+	dsb
+	isb
+
+	push	{r0 - r10, lr}
+	ldr	r1, =dcache_op_all
+	mov	r0, #DCACHE_OP_CLEAN_INV
+	mov	lr, pc
+	bx	r1
+	pop	{r0 - r10, lr}
+
+
+	.endm
+
+	.macro	restore_mmdc_settings
+
+	ldr	r10, =MMDC_IO_BASE
+	ldr	r11, =MMDC_BASE
+
+	/* resume mmdc iomuxc settings */
+	ldr	r6, [r0, #PM_INFO_MX7ULP_MMDC_IO_NUM_OFF]
+	ldr	r7, =PM_INFO_MX7ULP_MMDC_IO_VAL_OFF
+	add	r7, r7, r0
+11:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	str	r9, [r10, r8]
+	subs	r6, r6, #0x1
+	bne	11b
+
+	/* restore MMDC settings */
+	ldr	r6, [r0, #PM_INFO_MX7ULP_MMDC_NUM_OFF]
+	ldr	r7, =PM_INFO_MX7ULP_MMDC_VAL_OFF
+	add	r7, r7, r0
+1:
+	ldr	r8, [r7], #0x4
+	ldr	r9, [r7], #0x4
+	str	r9, [r11, r8]
+	subs	r6, r6, #0x1
+	bne	1b
+
+	/* let DDR enter self-refresh */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+2:
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	beq	2b
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+3:
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	bne	3b
+
+	/* kick off MMDC */
+	ldr	r4, =0x0
+	str	r4, [r11, #0x1c]
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+4:
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	bne	4b
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+
+	.endm
+
+FUNC imx7ulp_suspend, :
+	push	{r4-r12}
+
+	/*
+	 * The value of r0 is mapped the same in origin table and IRAM table,
+	 * thus no need to care r0 here.
+	 */
+	ldr	r1, [r0, #PM_INFO_MX7ULP_PBASE_OFF]
+	ldr	r2, [r0, #PM_INFO_MX7ULP_RESUME_ADDR_OFF]
+	ldr	r3, [r0, #PM_INFO_MX7ULP_SIZE_OFF]
+
+	/*
+	 * counting the resume address in iram
+	 * to set it in SRC register.
+	 */
+	ldr	r6, =imx7ulp_suspend
+	ldr	r7, =resume
+	sub	r7, r7, r6
+	add	r8, r1, r3
+	add	r9, r8, r7
+
+	ldr	r11, [r0, #PM_INFO_MX7ULP_SIM_VBASE_OFF]
+	/* store physical resume addr and pm_info address. */
+	str	r9, [r11, #DGO_GPR3]
+	str	r1, [r11, #DGO_GPR4]
+	ldr	r7, [r11, #DGO_CTRL0]
+	orr	r7, r7, #0xc
+	str	r7, [r11, #DGO_CTRL0]
+wait_dgo:
+	ldr	r7, [r11, #DGO_CTRL0]
+	and	r7, r7, #0x18000
+	cmp	r7, #0x18000
+	bne	wait_dgo
+
+	ldr	r7, [r11, #DGO_CTRL0]
+	orr	r7, r7, #0x18000
+	bic	r7, r7, #0xc
+	str	r7, [r11, #DGO_CTRL0]
+
+	disable_l1_dcache
+
+	store_ttbr1
+
+	ldr	r11, [r0, #PM_INFO_MX7ULP_MMDC_VBASE_OFF]
+
+	/*
+	 * put DDR explicitly into self-refresh and
+	 * disable automatic power savings.
+	 */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	orr	r7, r7, #0x1
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+
+	/* make the DDR explicitly enter self-refresh. */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	orr	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+
+poll_dvfs_set:
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	beq	poll_dvfs_set
+
+	/* switch NIC clock to FIRC */
+	ldr	r10, [r0, #PM_INFO_MX7ULP_SCG1_VBASE_OFF]
+	ldr	r7, [r10, #SCG_NICCCR]
+	bic	r7, #(1 << 28)
+	str	r7, [r10, #SCG_NICCCR]
+
+	/* switch RUN clock to FIRC */
+	ldr	r7, [r10, #SCG_RCCR]
+	bic	r7, #(0xf << 24)
+	orr	r7, #(0x3 << 24)
+	str	r7, [r10, #SCG_RCCR]
+
+	/* turn off SPLL and SPFD */
+	ldr	r7, [r10, #SCG_SPLLPFD]
+	mov	r8, r7
+	orr	r7, r7, #(0x1 << 31)
+	orr	r7, r7, #(0x1 << 23)
+	orr	r7, r7, #(0x1 << 15)
+	orr	r7, r7, #(0x1 << 7)
+	str	r7, [r10, #SCG_SPLLPFD]
+
+	ldr	r7, [r10, #SCG_SPLLCSR]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #SCG_SPLLCSR]
+
+	/* turn off APLL and APFD */
+	ldr	r7, [r10, #SCG_APLLPFD]
+	mov	r9, r7
+	orr	r7, r7, #(0x1 << 31)
+	orr	r7, r7, #(0x1 << 23)
+	orr	r7, r7, #(0x1 << 15)
+	orr	r7, r7, #(0x1 << 7)
+	str	r7, [r10, #SCG_APLLPFD]
+
+	ldr	r7, [r10, #SCG_APLLCSR]
+	bic	r7, r7, #0x1
+	str	r7, [r10, #SCG_APLLCSR]
+
+	/* Zzz, enter stop mode */
+	wfi
+	nop
+	nop
+	nop
+	nop
+
+	/* clear core0's entry and parameter */
+	ldr	r10, [r0, #PM_INFO_MX7ULP_SIM_VBASE_OFF]
+	mov	r7, #0x0
+	str	r7, [r10, #DGO_GPR3]
+	str	r7, [r10, #DGO_GPR4]
+
+	/* enable SPLL and SPFD */
+	ldr	r10, [r0, #PM_INFO_MX7ULP_SCG1_VBASE_OFF]
+	ldr	r7, [r10, #SCG_SPLLCSR]
+	orr	r7, r7, #1
+	str	r7, [r10, #SCG_SPLLCSR]
+wait_spll:
+	ldr	r7, [r10, #SCG_SPLLCSR]
+	ands	r7, r7, #(1 << 24)
+	beq	wait_spll
+
+	str	r8, [r10, #SCG_SPLLPFD]
+	/* switch RUN clock to SPLL */
+	ldr	r7, [r10, #SCG_RCCR]
+	bic	r7, #(0xf << 24)
+	orr	r7, #(0x6 << 24)
+	str	r7, [r10, #SCG_RCCR]
+
+	/* enable APLL and APFD */
+	ldr	r7, [r10, #SCG_APLLCSR]
+	orr	r7, r7, #1
+	str	r7, [r10, #SCG_APLLCSR]
+wait_apll:
+	ldr	r7, [r10, #SCG_APLLCSR]
+	ands	r7, r7, #(1 << 24)
+	beq	wait_apll
+
+	str	r9, [r10, #SCG_APLLPFD]
+
+	/* switch NIC clock to DDR */
+	ldr	r7, [r10, #SCG_NICCCR]
+	orr	r7, #(1 << 28)
+	str	r7, [r10, #SCG_NICCCR]
+
+	/* let DDR out of self-refresh */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	bic	r7, r7, #(1 << 20)
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+poll_dvfs_clear:
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	ands	r7, r7, #(1 << 24)
+	bne	poll_dvfs_clear
+
+	/* enable DDR auto power saving */
+	ldr	r7, [r11, #MX7ULP_MMDC_MAPSR]
+	bic	r7, r7, #0x1
+	str	r7, [r11, #MX7ULP_MMDC_MAPSR]
+
+	restore_ttbr1
+	pop	{r4-r12}
+	/* return to suspend finish */
+	bx	lr
+
+resume:
+	/* invalidate L1 I-cache first */
+	mov     r6, #0x0
+	mcr     p15, 0, r6, c7, c5, 0
+	mcr     p15, 0, r6, c7, c5, 6
+	/* enable the Icache and branch prediction */
+	mov     r6, #0x1800
+	mcr     p15, 0, r6, c1, c0, 0
+	isb
+
+	/* monitor mode */
+	mov	r3, #0x16
+	mov	r4, #((1 << 6) | (1 << 7))
+	orr	r3, r3, r4
+	msr	cpsr, r3
+	nop
+	nop
+	nop
+
+	ldr	r6, =SIM_BASE
+	ldr	r0, [r6, #DGO_GPR4]
+	/* get physical resume address from pm_info. */
+	ldr	lr, [r0, #PM_INFO_MX7ULP_RESUME_ADDR_OFF]
+
+	ldr	r11, =SCG1_BASE
+	/* enable spll and pfd0 */
+	ldr	r5, =PM_INFO_MX7ULP_SCG1_VAL_OFF
+	add	r6, r5, #48
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SPLLCFG]
+
+	add	r6, r5, #56
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SPLLNUM]
+
+	add	r6, r5, #60
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SPLLDENOM]
+
+	add	r6, r5, #40
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SPLLCSR]
+5:
+	ldr	r7, [r11, #SCG_SPLLCSR]
+	ands	r7, r7, #0x1000000
+	beq	5b
+
+	add	r6, r5, #44
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SPLLDIV]
+
+	add	r6, r5, #52
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SPLLPFD]
+
+	add	r6, r5, #0
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_RCCR]
+
+	/* enable apll and pfd0 */
+	add	r6, r5, #24
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_APLLCFG]
+
+	add	r6, r5, #32
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_APLLNUM]
+
+	add	r6, r5, #36
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_APLLDENOM]
+
+	add	r6, r5, #16
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_APLLCSR]
+6:
+	ldr	r7, [r11, #SCG_APLLCSR]
+	ands	r7, r7, #0x1000000
+	beq	6b
+
+	add	r6, r5, #20
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_APLLDIV]
+
+	add	r6, r5, #28
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_APLLPFD]
+
+	/* set ddr ccr */
+	add	r6, r5, #4
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_DDRCCR]
+
+	/* set nic sel */
+	add	r6, r5, #8
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_NICCCR]
+
+	/* set firc div2 to get 48MHz */
+	add	r6, r5, #12
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_FIRCDIV]
+
+	/* restore system OSC div */
+	add	r6, r5, #64
+	ldr	r7, [r0, r6]
+	str	r7, [r11, #SCG_SOSCDIV]
+
+	/* enable mmdc clock in pcc3 */
+	ldr	r11, =PCC3_BASE
+	ldr	r7, [r11, #0xac]
+	orr	r7, r7, #(1 << 30)
+	str	r7, [r11, #0xac]
+
+	/* enable GPIO clock in pcc2 */
+	ldr	r11, =PCC2_BASE
+	ldr	r7, [r11, #0x3c]
+	orr	r7, r7, #(1 << 30)
+	str	r7, [r11, #0x3c]
+
+	/* restore gpio settings */
+	ldr	r10, =GPIOC_BASE
+	ldr	r7, =PM_INFO_MX7ULP_GPIO_REG_OFF
+	add	r7, r7, r0
+	ldr	r6, =GPIO_PORT_NUM
+12:
+	ldr	r9, [r7], #0x4
+	str	r9, [r10, #GPIO_PDOR]
+	ldr	r9, [r7], #0x4
+	str	r9, [r10, #GPIO_PDDR]
+	add     r10, r10, #GPIO_PORT_OFF
+	subs	r6, r6, #0x1
+	bne	12b
+
+	/* restore iomuxc settings */
+	ldr	r10, =IOMUXC1_BASE
+	add	r10, r10, #IOMUX_OFF
+	ldr	r6, [r0, #PM_INFO_MX7ULP_IOMUX_NUM_OFF]
+	ldr	r7, =PM_INFO_MX7ULP_IOMUX_VAL_OFF
+	add	r7, r7, r0
+13:
+	ldr	r9, [r7], #0x4
+	str	r9, [r10], #0x4
+	subs	r6, r6, #0x1
+	bne	13b
+
+	/* restore select input settings */
+	ldr	r10, =IOMUXC1_BASE
+	add	r10, r10, #SELECT_INPUT_OFF
+	ldr	r6, [r0, #PM_INFO_MX7ULP_SELECT_INPUT_NUM_OFF]
+	ldr	r7, =PM_INFO_MX7ULP_SELECT_INPUT_VAL_OFF
+	add	r7, r7, r0
+14:
+	ldr	r9, [r7], #0x4
+	str	r9, [r10], #0x4
+	subs	r6, r6, #0x1
+	bne	14b
+
+	/* isoack */
+	ldr	r6, =PMC1_BASE
+	ldr	r7, [r6, #PMC1_CTRL]
+	orr	r7, r7, #(1 << 14)
+	str	r7, [r6, #PMC1_CTRL]
+
+	restore_mmdc_settings
+
+	mov	pc, lr
+END_FUNC imx7ulp_suspend
+
+FUNC imx7ulp_cpu_resume, :
+	mov	r0, #0
+	mcr     p15, 2, r0, c0, c0, 0
+	isb
+
+_inv_dcache_off:
+	mov	r0, #0
+_inv_nextWay:
+	mov	r1, #0
+_inv_nextLine:
+	orr     r2, r0, r1
+	mcr     p15, 0, r2, c7, c6, 2
+	add     r1, r1, #1 << LINE_FIELD_OFFSET
+	cmp     r1, #1 << LINE_FIELD_OVERFLOW
+	bne	_inv_nextLine
+	add	r0, r0, #1 << WAY_FIELD_OFFSET
+	cmp	r0, #0
+	bne	_inv_nextWay
+
+	dsb
+	nop
+	nop
+
+	/*
+	 * No stack, scratch r0-r3
+	 * TODO: Need to use specific configure, but not plat_xxx.
+	 * Because plat_xx maybe changed in future, we can not rely on it.
+	 * Need handle sp carefully.
+	 */
+	blx	plat_cpu_reset_early
+
+	b	sm_pm_cpu_resume
+END_FUNC imx7ulp_cpu_resume
diff --git a/core/drivers/pm/imx/suspend/sub.mk b/core/drivers/pm/imx/suspend/sub.mk
new file mode 100644
index 000000000..75eedbcc6
--- /dev/null
+++ b/core/drivers/pm/imx/suspend/sub.mk
@@ -0,0 +1,5 @@
+incdirs-y += ./..
+
+srcs-$(CFG_MX7) += psci-suspend-imx7.S imx7_suspend.c
+srcs-$(CFG_MX6) += imx6_suspend.c psci-suspend-imx6.S
+srcs-$(CFG_MX7ULP) += psci-suspend-imx7ulp.S imx7ulp_suspend.c
diff --git a/core/drivers/pm/sub.mk b/core/drivers/pm/sub.mk
index b52e45599..90efbd681 100644
--- a/core/drivers/pm/sub.mk
+++ b/core/drivers/pm/sub.mk
@@ -1,2 +1,2 @@
 subdirs-$(CFG_ATMEL_PM) += sam
-subdirs-$(CFG_IMX_PM) += imx
+subdirs-$(CFG_IMX_PM) += imx
\ No newline at end of file
diff --git a/core/drivers/sub.mk b/core/drivers/sub.mk
index 1538bd6b8..dab2a944d 100644
--- a/core/drivers/sub.mk
+++ b/core/drivers/sub.mk
@@ -16,6 +16,7 @@ srcs-$(CFG_IMX_UART) += imx_uart.c
 srcs-$(CFG_IMX_I2C) += imx_i2c.c
 srcs-$(CFG_IMX_LPUART) += imx_lpuart.c
 srcs-$(CFG_IMX_WDOG) += imx_wdog.c
+srcs-$(CFG_IMX_TRUSTED_ARM_CE) += imx_trusted_arm_ce.c
 srcs-$(CFG_SPRD_UART) += sprd_uart.c
 srcs-$(CFG_HI16XX_UART) += hi16xx_uart.c
 srcs-$(CFG_HI16XX_RNG) += hi16xx_rng.c
@@ -63,7 +64,6 @@ srcs-$(CFG_IMX_SCU) += imx_scu.c
 srcs-$(CFG_IMX_CSU) += imx_csu.c
 srcs-$(CFG_XIPHERA_TRNG) += xiphera_trng.c
 srcs-$(CFG_IMX_SC) += imx_sc_api.c
-srcs-$(CFG_IMX_ELE) += imx_ele.c
 srcs-$(CFG_ZYNQMP_CSU_PUF) += zynqmp_csu_puf.c
 srcs-$(CFG_ZYNQMP_CSUDMA) += zynqmp_csudma.c
 srcs-$(CFG_ZYNQMP_CSU_AES) += zynqmp_csu_aes.c
diff --git a/core/include/drivers/ele/ele.h b/core/include/drivers/ele/ele.h
new file mode 100644
index 000000000..29b4fec55
--- /dev/null
+++ b/core/include/drivers/ele/ele.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright NXP 2023
+ */
+
+#ifndef __ELE_H_
+#define __ELE_H_
+
+#include <drivers/imx_mu.h>
+#include <drivers/ele/utils_mem.h>
+#include <tee_api_types.h>
+#include <trace.h>
+
+/* Definitions for communication protocol */
+#define ELE_VERSION_HSM 0x07
+#define ELE_REQUEST_TAG 0x17
+#define ELE_RESPONSE_TAG 0xe1
+#define ELE_VERSION_BASELINE 0x06
+
+/* Definitions for Key Lifetime attribute */
+#define ELE_KEY_LIFETIME_VOLATILE	      0x00000000
+#define ELE_KEY_LIFETIME_PERSISTENT	      0x00000001
+#define ELE_KEY_LIFETIME_VOLATILE_PERMANENT   0x00000080
+#define ELE_KEY_LIFETIME_PERSISTENT_PERMANENT 0x00000081
+
+/* Definitions for Key Usage attribute */
+#define ELE_KEY_USAGE_EXPORT 0x00000001
+
+/* Key store information */
+#define ELE_KEY_STORE_AUTH_NONCE  0x1234
+#define ELE_KEY_STORE_MAX_UPDATES 100
+
+/* Key groups for grouping keys */
+#define ELE_KEY_GROUP_VOLATILE	 0
+#define ELE_KEY_GROUP_PERSISTENT 1
+
+/* Key Store and Key Gen Flags */
+#define IMX_ELE_FLAG_SYNC 0x80
+#define IMX_ELE_FLAG_MON_INC 0x20
+
+/* Key Lifecycle */
+#define ELE_KEY_LIFECYCLE_DEVICE 0x00
+#define ELE_KEY_LIFECYCLE_OPEN 0x01
+#define ELE_KEY_LIFECYCLE_CLOSED 0x02
+#define ELE_KEY_LIFECYCLE_CLOSED_LOCKED 0x04
+
+/*
+ * ELE response code
+ */
+struct response_code {
+	uint8_t status;
+	uint8_t rating;
+	uint16_t rating_extension;
+} __packed;
+
+static inline size_t size_msg(size_t cmd)
+{
+	size_t words = ROUNDUP(cmd, sizeof(uint32_t)) / sizeof(uint32_t);
+
+	/* Add the header size */
+	words = words + 1;
+
+	return words;
+}
+
+#define SIZE_MSG_32(_msg) size_msg(sizeof(_msg))
+
+/*
+ * Extract response codes from the given word
+ *
+ * @word 32 bits word MU response
+ */
+struct response_code get_response_code(uint32_t word);
+
+/*
+ * The CRC is the last word of the message
+ *
+ * msg: MU message to hash
+ */
+void update_crc(struct imx_mu_msg *msg);
+
+/*
+ * Open a session with EdgeLock Enclave. It returns a session handle.
+ *
+ * @session_handle EdgeLock Enclave session handle
+ */
+TEE_Result imx_ele_session_open(uint32_t *session_handle);
+
+/*
+ * Close a session with EdgeLock Enclave.
+ *
+ * @session_handle EdgeLock Enclave session handle
+ */
+TEE_Result imx_ele_session_close(uint32_t session_handle);
+/*
+ * Initiate a communication with the EdgeLock Enclave. It sends a message
+ * and expects an answer.
+ *
+ * @msg MU message
+ */
+TEE_Result imx_ele_call(struct imx_mu_msg *msg);
+TEE_Result imx_ele_get_global_session_handle(uint32_t *session_handle);
+
+#endif /* __ELE_H_ */
diff --git a/core/include/drivers/ele/key_mgmt.h b/core/include/drivers/ele/key_mgmt.h
new file mode 100644
index 000000000..e65ca59bd
--- /dev/null
+++ b/core/include/drivers/ele/key_mgmt.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright NXP 2023
+ */
+
+#ifndef __KEY_MGMT_H__
+#define __KEY_MGMT_H__
+
+#include <tee_api_types.h>
+
+/* Key Usage */
+#define ELE_KEY_USAGE_SIGN_MSG 0x00000400
+#define ELE_KEY_USAGE_VERIFY_MSG 0x00000800
+#define ELE_KEY_USAGE_SIGN_HASH 0x00001000
+#define ELE_KEY_USAGE_VERIFY_HASH 0x00002000
+#define ELE_KEY_USAGE_DERIVE 0x00004000
+
+/* ECC Key types */
+#define ELE_KEY_TYPE_ECC_KEY_PAIR_BRAINPOOL_R1 0x7130
+#define ELE_KEY_TYPE_ECC_PUB_KEY_BRAINPOOL_R1 0x4130
+#define ELE_KEY_TYPE_ECC_KEY_PAIR_BRAINPOOL_T1 0x7180
+#define ELE_KEY_TYPE_ECC_PUB_KEY_BRAINPOOL_T1 0x4180
+#define ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1 0x7112
+#define ELE_KEY_TYPE_ECC_PUB_KEY_SECP_R1 0x4112
+
+/*
+ * Open a Key Management session with EdgeLock Enclave.
+ *
+ * @key_store_handle: EdgeLock Enclave key store handle
+ * @key_mgmt_handle: EdgeLock Enclave Key management handle
+ */
+TEE_Result imx_ele_key_mgmt_open(uint32_t key_store_handle,
+				 uint32_t *key_mgmt_handle);
+
+/*
+ * Close Key management with EdgeLock Enclave.
+ *
+ * @key_mgmt_handle: EdgeLock Enclave key management handle
+ */
+TEE_Result imx_ele_key_mgmt_close(uint32_t key_mgmt_handle);
+
+/*
+ * Generate a Key be it Asymmetric or Symmetric
+ *
+ * @key_mgmt_handle: EdgeLock Enclave key management handle
+ * @public_key_size: Size in bytes of the output where to store the generated
+ *		     Key. It must be 0 if a symmetric key is generated.
+ *		     If the size is different than 0, EdgeLock Enclave will
+ *		     attempt to copy the public key for asymmetric algorithm.
+ * @key_group: Indicates the generated key group.
+ * @sync: Whether to push persistent keys in the NVM(Non Volatile Memory).
+ *        Without it, even if the key attribute is set as persistent
+ *        at the key creation (generation, importation), the key will
+ *        not be stored in the NVM.
+ * @mon_inc: Whether to increment the monotonic counter or not.
+ * @key_lifetime: Lifetime of the key (Volatile or Persistent)
+ * @key_usage: Defines cryptographic operations that key can execute.
+ * @key_type: Defines Key type
+ * @key_size: Key Size in bits
+ * @permitted_algo: Defines algorithms in which key can be used.
+ * @key_lifecycle: Defines in which device lifecycle the key is usable
+ *		   OPEN, CLOSED, CLOSED and LOCKED
+ * @public_key_addr: In case of Asymmetric Key, address to where Edgelock
+ *		Enclave will copy the Public Key.
+ * @key_identifier: Identifier of the generated key
+ */
+TEE_Result imx_ele_generate_key(uint32_t key_mgmt_handle,
+				size_t public_key_size, uint16_t key_group,
+				bool sync, bool mon_inc, uint32_t key_lifetime,
+				uint32_t key_usage, uint16_t key_type,
+				size_t key_size, uint32_t permitted_algo,
+				uint32_t key_lifecycle,
+				uint8_t *public_key_addr,
+				uint32_t *key_identifier);
+
+/*
+ * Delete a Key
+ *
+ * @key_mgmt_handle: EdgeLock Enclave key management handle
+ * @key_identifier: Identifier of key to be deleted
+ * @sync: Whether to delete persistent keys in the NVM(Non Volatile Memory).
+ * @mon_inc: Whether to increment the monotonic counter or not.
+ */
+TEE_Result imx_ele_delete_key(uint32_t key_mgmt_handle, uint32_t key_identifier,
+			      bool sync, bool mon_inc);
+
+#endif /* __KEY_MGMT_H_ */
diff --git a/core/include/drivers/ele/key_store.h b/core/include/drivers/ele/key_store.h
new file mode 100644
index 000000000..56c8d6e71
--- /dev/null
+++ b/core/include/drivers/ele/key_store.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef __KEY_STORE_H_
+#define __KEY_STORE_H_
+
+#include <tee_api_types.h>
+
+/*
+ * Open a Keystore session with EdgeLock Enclave.
+ *
+ * @session_handle: EdgeLock Enclave session handle
+ * @key_store_id: User defined word identifying the key store
+ * @auth_nonce: Nonce used as authentication proof for accessing
+ *		the key store.
+ * @create: Whether to create the key store or load it.
+ * @mon_inc: Whether to increment the monotonic counter or not.
+ * @sync: Whether to push persistent keys in the NVM(Non Volatile Memory).
+ *        Without it, even if the key attribute is set as persistent
+ *        at the key creation (generation, importation), the key will
+ *        not be stored in the NVM.
+ * @key_store_handle: EdgeLock Enclave Key store handle.
+ */
+TEE_Result imx_ele_key_store_open(uint32_t session_handle,
+				  uint32_t key_store_id, uint32_t auth_nonce,
+				  bool create, bool mon_inc, bool sync,
+				  uint32_t *key_store_handle);
+
+/*
+ * Close Key store with EdgeLock Enclave.
+ *
+ * @key_store_handle: EdgeLock Enclave key store handle
+ * @strict: Whether to push persistent keys in the NVM.
+ */
+TEE_Result imx_ele_key_store_close(uint32_t key_store_handle);
+/*
+ * Get global Key store handle.
+ *
+ * @key_store_handle: EdgeLock Enclave key store handle
+ */
+TEE_Result imx_ele_get_global_key_store_handle(uint32_t *key_store_handle);
+
+#endif /* __KEY_STORE_H_ */
diff --git a/core/include/drivers/ele/sign_verify.h b/core/include/drivers/ele/sign_verify.h
new file mode 100644
index 000000000..b913ae851
--- /dev/null
+++ b/core/include/drivers/ele/sign_verify.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef __ACIPHER_H__
+#define __ACIPHER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+#include <tee_api_types.h>
+#include <drivers/ele/utils_mem.h>
+
+/* Key permitted algorithms */
+#define ELE_ALGO_ECDSA_SHA224 0x06000608
+#define ELE_ALGO_ECDSA_SHA256 0x06000609
+#define ELE_ALGO_ECDSA_SHA384 0x0600060A
+#define ELE_ALGO_ECDSA_SHA512 0x0600060B
+#define ELE_ALGO_ECDSA_ANY 0x06000600
+#define ELE_ALGO_ECDSA_NOT_SUPPORTED 0x12345678
+
+/* Signature generation message type */
+#define ELE_SIG_GEN_MSG_TYPE_MESSAGE 0x1
+#define ELE_SIG_GEN_MSG_TYPE_DIGEST  0x0
+
+#define ELE_SIG_VERIFICATION_SUCCESS 0x5A3CC3A5
+#define ELE_SIG_VERIFICATION_FAILURE 0x2B4DD4B2
+
+/*
+ * Open a Signature Generation Flow
+ *
+ * @key_store_handle: EdgeLock Enclave key store handle
+ * @sig_gen_handle: Signature Generation handle returned by ELE
+ */
+TEE_Result imx_ele_sig_gen_open(uint32_t key_store_handle,
+				uint32_t *sig_gen_handle);
+
+/*
+ * Close a signature generation flow
+ *
+ * @sig_gen_handle: signature generation handle to be closed
+ */
+TEE_Result imx_ele_sig_gen_close(uint32_t sig_gen_handle);
+
+/*
+ * Signature generate operation
+ *
+ * @sig_gen_handle: edgelock enclave signature generation handle
+ * @key_identifier: identifier of key to be used for operation
+ * @message: data on which signature will be generated
+ * @message_size: message size
+ * @signature: generated signature
+ * @signature_size: signature size
+ * @signature_scheme: signature scheme to be used for signature generation
+ * @message_type: whethere passed message is digest or actual message
+ *		  (ELE_SIG_GEN_MSG_TYPE_MESSAGE/ELE_SIG_GEN_MSG_TYPE_HASH)
+ */
+TEE_Result imx_ele_signature_generate(uint32_t sig_gen_handle,
+				      uint32_t key_identifier,
+				      const uint8_t *message,
+				      size_t message_size, uint8_t *signature,
+				      size_t signature_size,
+				      uint32_t signature_scheme,
+				      uint8_t message_type);
+
+/*
+ * Open a signature verification flow
+ *
+ * @session_handle: edgelock enclave session handle
+ * @sig_verify_handle: signature verification handle returned by ele
+ */
+TEE_Result imx_ele_sig_verify_open(uint32_t session_handle,
+				   uint32_t *sig_verify_handle);
+
+/*
+ * Close a signature verification flow
+ *
+ * @sig_verify_handle: signature verif handle to be closed
+ */
+TEE_Result imx_ele_sig_verify_close(uint32_t sig_verify_handle);
+
+/*
+ * Signature verification operation
+ *
+ * @sig_verify_handle: edgelock enclave signature generation handle
+ * @key: public key to be used for operation
+ * @message: data on which signature was generated
+ * @message_size: message size
+ * @signature: generated signature
+ * @signature_size: signature size
+ * @key_size: key size
+ * @key_security_size: key security size
+ * @key_type: key type
+ * @signature_scheme: signature scheme to be used for signature generation
+ * @message_type: whether passed message is digest or actual message
+ *		  (ELE_SIG_GEN_MSG_TYPE_MESSAGE/ELE_SIG_GEN_MSG_TYPE_HASH)
+ */
+TEE_Result imx_ele_signature_verification(uint32_t sig_verify_handle,
+					  const uint8_t *key,
+					  const uint8_t *message,
+					  size_t message_size,
+					  const uint8_t *signature,
+					  size_t signature_size,
+					  size_t key_size,
+					  size_t key_security_size,
+					  uint16_t key_type,
+					  uint32_t signature_scheme,
+					  uint8_t message_type);
+
+#endif /* __ACIPHER_H__ */
diff --git a/core/include/drivers/ele/utils_mem.h b/core/include/drivers/ele/utils_mem.h
new file mode 100644
index 000000000..206e503b8
--- /dev/null
+++ b/core/include/drivers/ele/utils_mem.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ *
+ * Memory management utilities.
+ * Primitive to allocate, free memory.
+ */
+
+#ifndef __UTILS_MEM_H__
+#define __UTILS_MEM_H__
+
+#include <kernel/cache_helpers.h>
+#include <stddef.h>
+#include <tee_api_types.h>
+#include <tee/cache.h>
+
+/*
+ * Definition of a IMX ELE buffer type
+ */
+struct imx_ele_buf {
+	uint8_t *data; /* Data buffer */
+	size_t size;   /* Number of bytes in the data buffer */
+	paddr_t paddr; /* Physical address of the buffer */
+	uint32_t paddr_msb; /* MSB of the physical address */
+	uint32_t paddr_lsb; /* LSB of the physical address */
+};
+
+/*
+ * Cache operation on IMX ELE buffer
+ *
+ * @op: Cache operation
+ * @ele_buf: Buffer on which cache operation to be performed
+ */
+void imx_ele_buf_cache_op(enum utee_cache_operation op,
+			  struct imx_ele_buf *ele_buf);
+/*
+ * Allocate cache aligned buffer, initialize it with 0's, copy data from
+ * @buf to newly allocated buffer and cache flush the buffer.
+ *
+ * @ele_buf: Buffer allocated
+ * @buf: If valid, will copy contents from this buffer to newly allocated
+ *        buffer. Otherwise it is ignored.
+ * @size: Size in bytes of the memory to allocate.
+ */
+TEE_Result imx_ele_buf_alloc(struct imx_ele_buf *ele_buf, const uint8_t *buf,
+			     size_t size);
+
+/*
+ * Free buffer allocated memory
+ *
+ * @ele_buf:  Buffer to free
+ */
+void imx_ele_buf_free(struct imx_ele_buf *ele_buf);
+
+/*
+ * Copy data from ele_buf to data
+ *
+ * @ele_buf: Buffer from data to be copied
+ * @buf: Buffer to which data to be copied
+ * @size: Size of buf
+ */
+TEE_Result imx_ele_buf_copy(struct imx_ele_buf *ele_buf, uint8_t *buf,
+			    size_t size);
+
+#endif /* __UTILS_MEM_H__ */
diff --git a/core/include/drivers/ele_extension.h b/core/include/drivers/ele_extension.h
new file mode 100644
index 000000000..f5f3298a9
--- /dev/null
+++ b/core/include/drivers/ele_extension.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef __ELE_EXTENSION_H__
+#define __ELE_EXTENSION_H__
+
+#include <tee_api_types.h>
+#include <types_ext.h>
+
+/* The i.MX UID is 64 bits long */
+#define IMX_UID_SIZE sizeof(uint64_t)
+
+/*
+ * Derive a subkey from the ELE unique key.
+ * Given the same input the same subkey is returned each time.
+ * @ctx:		Constant data to generate different subkey with
+ *			the same usage
+ * @ctx_size:		Length of constant data
+ * @key:		Generated subkey virtual address
+ *			that must map an aligned physical address
+ * @key_size:		Required size of the subkey, must be 16 or 32.
+ *
+ * Returns a subkey.
+ */
+TEE_Result imx_ele_derive_key(const uint8_t *ctx, size_t ctx_size, uint8_t *key,
+			      size_t key_size);
+
+/*
+ * Read ELE shadow register
+ *
+ * @bank     Fuse bank number
+ * @word     Fuse word number
+ * @val      Shadow register value
+ */
+TEE_Result imx_ocotp_read(unsigned int bank, unsigned int word, uint32_t *val);
+
+#endif /* __ELE_EXTENSION_H__ */
diff --git a/core/include/drivers/imx_mu.h b/core/include/drivers/imx_mu.h
index 61fc11180..0530233bd 100644
--- a/core/include/drivers/imx_mu.h
+++ b/core/include/drivers/imx_mu.h
@@ -16,7 +16,8 @@
 #define IMX_MU_MSG_SIZE	  17
 #define IMX_MU_NB_CHANNEL 4
 
-#if defined(CFG_MX8ULP) || defined(CFG_MX93) || defined(CFG_MX91)
+#if defined(CFG_MX8ULP) || defined(CFG_MX93) || defined(CFG_MX91) || \
+	defined(CFG_MX95)
 struct imx_mu_msg_header {
 	uint8_t version;
 	uint8_t size;
diff --git a/core/include/drivers/imx_scu.h b/core/include/drivers/imx_scu.h
new file mode 100644
index 000000000..8551bd484
--- /dev/null
+++ b/core/include/drivers/imx_scu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef __DRIVERS_IMX_SCU_H
+#define __DRIVERS_IMX_SCU_H
+
+#include <tee_api_types.h>
+
+TEE_Result scu_init(void);
+
+#endif /* __DRIVERS_IMX_SCU_H */
diff --git a/core/include/drivers/imx_trusted_arm_ce.h b/core/include/drivers/imx_trusted_arm_ce.h
new file mode 100644
index 000000000..41b5cc4fb
--- /dev/null
+++ b/core/include/drivers/imx_trusted_arm_ce.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+
+#ifndef _IMX_TRUSTED_ARM_CE_H
+#define _IMX_TRUSTED_ARM_CE_H
+
+#include <kernel/thread_arch.h>
+#include <sm/optee_smc.h>
+#include <tee_api_types.h>
+
+/*
+ * Do AES CBC Encryption
+ *
+ * Call register usage:
+ * a0	SMC Function ID, IMX_SMC_ENCRYPT_CBC
+ * a1	Key ids
+ * a2	Initial vector physical address
+ * a3	Input buffer physical address
+ * a4	Input buffer length
+ * a5	Output buffer physical address
+ * a6	Output buffer length
+ * a7	Not used
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1-3	Not used
+ * a4-7	Preserved
+ *
+ * OPTEE_SMC_RETURN_EBADCMD on Invalid input offset:
+ * a0	OPTEE_SMC_RETURN_EBADCMD
+ * a1-3	Not used
+ * a4-7	Preserved
+ */
+#define IMX_SMC_FUNCID_ENCRYPT_CBC U(20)
+#define IMX_SMC_ENCRYPT_CBC OPTEE_SMC_FAST_CALL_VAL(IMX_SMC_FUNCID_ENCRYPT_CBC)
+
+/*
+ * Do AES CBC Decryption
+ *
+ * Call register usage:
+ * a0	SMC Function ID, IMX_SMC_DECRYPT_CBC
+ * a1	Key ids
+ * a2	Initial vector physical address
+ * a3	Input buffer physical address
+ * a4	Input buffer length
+ * a5	Output buffer physical address
+ * a6	Output buffer length
+ * a7	Not used
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1-3	Not used
+ * a4-7	Preserved
+ *
+ * OPTEE_SMC_RETURN_EBADCMD on Invalid input offset:
+ * a0	OPTEE_SMC_RETURN_EBADCMD
+ * a1-3	Not used
+ * a4-7	Preserved
+ */
+#define IMX_SMC_FUNCID_DECRYPT_CBC U(21)
+#define IMX_SMC_DECRYPT_CBC OPTEE_SMC_FAST_CALL_VAL(IMX_SMC_FUNCID_DECRYPT_CBC)
+
+/*
+ * Do AES XTS Encryption
+ *
+ * Call register usage:
+ * a0	SMC Function ID, IMX_SMC_ENCRYPT_XTS
+ * a1	Key ids
+ * a2	Initial vector physical address
+ * a3	Input buffer physical address
+ * a4	Input buffer length
+ * a5	Output buffer physical address
+ * a6	Output buffer length
+ * a7	Not used
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1-3	Not used
+ * a4-7	Preserved
+ *
+ * OPTEE_SMC_RETURN_EBADCMD on Invalid input offset:
+ * a0	OPTEE_SMC_RETURN_EBADCMD
+ * a1-3	Not used
+ * a4-7	Preserved
+ */
+#define IMX_SMC_FUNCID_ENCRYPT_XTS U(22)
+#define IMX_SMC_ENCRYPT_XTS OPTEE_SMC_FAST_CALL_VAL(IMX_SMC_FUNCID_ENCRYPT_XTS)
+
+/*
+ * Do AES XTS Decryption
+ *
+ * Call register usage:
+ * a0	SMC Function ID, IMX_SMC_DECRYPT_XTS
+ * a1	Key ids
+ * a2	Initial vector physical address
+ * a3	Input buffer physical address
+ * a4	Input buffer length
+ * a5	Output buffer physical address
+ * a6	Output buffer length
+ * a7	Not used
+ *
+ * Normal return register usage:
+ * a0	OPTEE_SMC_RETURN_OK
+ * a1-3	Not used
+ * a4-7	Preserved
+ *
+ * OPTEE_SMC_RETURN_EBADCMD on Invalid input offset:
+ * a0	OPTEE_SMC_RETURN_EBADCMD
+ * a1-3	Not used
+ * a4-7	Preserved
+ */
+#define IMX_SMC_FUNCID_DECRYPT_XTS U(23)
+#define IMX_SMC_DECRYPT_XTS OPTEE_SMC_FAST_CALL_VAL(IMX_SMC_FUNCID_DECRYPT_XTS)
+
+/*
+ * Trusted ARM CE aes cbc Fast SMC call
+ *
+ * @args: SMC call arguments
+ * @encrypt: true for encryption, false otherwise
+ */
+TEE_Result imx_smc_cipher_cbc(struct thread_smc_args *args, bool encrypt);
+
+/*
+ * Trusted ARM CE aes xts Fast SMC call
+ *
+ * @args: SMC call arguments
+ * @encrypt: true for encryption, false otherwise
+ */
+TEE_Result imx_smc_cipher_xts(struct thread_smc_args *args, bool encrypt);
+
+#endif /* _IMX_TRUSTED_ARM_CE_H */
diff --git a/core/include/drivers/pm/imx/busfreq.h b/core/include/drivers/pm/imx/busfreq.h
new file mode 100644
index 000000000..1bad5b020
--- /dev/null
+++ b/core/include/drivers/pm/imx/busfreq.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/**
+ * @copyright 2018 NXP
+ *
+ * @file    imx_busfreq.h
+ *
+ * @brief   Busfreq interface.
+ */
+#ifndef __BUSFREQ_H__
+#define __BUSFREQ_H__
+
+#include <tee_api_types.h>
+
+TEE_Result busfreq_change(uint32_t freq, uint32_t dll_off);
+
+#endif /* __BUSFREQ_H__ */
diff --git a/core/include/kernel/dt.h b/core/include/kernel/dt.h
index dc3bb7adb..eea4a9714 100644
--- a/core/include/kernel/dt.h
+++ b/core/include/kernel/dt.h
@@ -324,6 +324,9 @@ void reinit_manifest_dt(void);
 /* Returns TOS_FW_CONFIG DTB or SP manifest DTB if present, otherwise NULL */
 void *get_manifest_dt(void);
 
+/* Disable a subnode in the DT overlay */
+int dt_overlay_disable_node(char *target);
+
 #else /* !CFG_DT */
 
 static inline const struct dt_driver *dt_find_compatible_driver(
diff --git a/core/include/tee/entry_std.h b/core/include/tee/entry_std.h
index d9ddb6200..35a1e4172 100644
--- a/core/include/tee/entry_std.h
+++ b/core/include/tee/entry_std.h
@@ -23,4 +23,7 @@ TEE_Result __tee_entry_std(struct optee_msg_arg *arg, uint32_t num_params);
 /* Get list head for sessions opened from non-secure */
 void nsec_sessions_list_head(struct tee_ta_session_head **open_sessions);
 
-#endif /* __TEE_ENTRY_STD_H */
+/* Retrieve SDP mem cacheability */
+bool tee_entry_is_sdp_cached(void);
+
+#endif /* TEE_ENTRY_STD_H */
diff --git a/core/kernel/dt.c b/core/kernel/dt.c
index 98bdd396c..8af67fd5a 100644
--- a/core/kernel/dt.c
+++ b/core/kernel/dt.c
@@ -535,6 +535,42 @@ static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
 
 	return fdt_create_empty_tree(dt->blob, dt_size);
 }
+
+int dt_overlay_disable_node(char *target)
+{
+	char frag[32] = { };
+	int offs = 0;
+	int ret = 0;
+	struct dt_descriptor *dt = &external_dt;
+
+	offs = fdt_path_offset(dt->blob, "/");
+	if (offs < 0)
+		return offs;
+
+	ret = snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
+	if (ret < 0 || (size_t)ret >= sizeof(frag))
+		return -1;
+
+	offs = fdt_add_subnode(dt->blob, offs, frag);
+	if (offs < 0)
+		return offs;
+
+	dt->frag_id += 1;
+
+	ret = fdt_setprop_string(dt->blob, offs, "target-path", target);
+	if (ret < 0)
+		return ret;
+
+	offs = fdt_add_subnode(dt->blob, offs, "__overlay__");
+	if (offs < 0)
+		return offs;
+
+	offs = fdt_setprop_string(dt->blob, offs, "status", "disabled");
+	if (offs < 0)
+		return offs;
+
+	return 0;
+}
 #else
 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
 {
diff --git a/core/pta/imx/ele_test.c b/core/pta/imx/ele_test.c
new file mode 100644
index 000000000..860642b44
--- /dev/null
+++ b/core/pta/imx/ele_test.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+
+#include <drivers/ele/ele.h>
+#include <drivers/ele/key_store.h>
+#include <drivers/ele/key_mgmt.h>
+#include <drivers/ele/sign_verify.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/user_ta.h>
+#include <pta_ele_test.h>
+#include <stdint.h>
+#include <string.h>
+#include <tee_api_defines.h>
+#include <tee_api_types.h>
+
+#define PTA_NAME "ele_test.pta"
+
+#define PTA_ELE_KEY_STORE_ID 0x1234
+#define PTA_ELE_KEY_STORE_AUTH_NONCE 0x1234
+
+#define PTA_ELE_KEY_GROUP_VOLATILE 0x1
+#define PTA_ELE_KEY_GROUP_PERSISTENT 0x2
+
+#define PTA_ELE_ECC_KEY_USAGE                               \
+	(ELE_KEY_USAGE_SIGN_MSG | ELE_KEY_USAGE_SIGN_HASH | \
+	 ELE_KEY_USAGE_VERIFY_MSG | ELE_KEY_USAGE_VERIFY_HASH)
+
+#define GEN_KEY_TC(_sz, _psz, _key_gp, _key_type, _key_lt, _perm_alg, _sync)   \
+	{                                                                      \
+		.key_size = (_sz), .public_key_size = (_psz),                  \
+		.key_group = (_key_gp), .key_type = (_key_type),               \
+		.key_lifetime = (_key_lt), .permitted_algorithm = (_perm_alg), \
+		.sync = (_sync),                                               \
+	}
+
+struct gen_key_test_case {
+	size_t key_size;
+	size_t public_key_size;
+	uint32_t key_group;
+	uint32_t key_type;
+	uint32_t key_lifetime;
+	uint32_t permitted_algorithm;
+	uint32_t sync;
+};
+
+static const struct gen_key_test_case gen_key_tc[] = {
+	GEN_KEY_TC(224, 56, PTA_ELE_KEY_GROUP_VOLATILE,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1, ELE_KEY_LIFETIME_VOLATILE,
+		   ELE_ALGO_ECDSA_SHA224, 0),
+	GEN_KEY_TC(256, 64, PTA_ELE_KEY_GROUP_VOLATILE,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1, ELE_KEY_LIFETIME_VOLATILE,
+		   ELE_ALGO_ECDSA_SHA256, 0),
+	GEN_KEY_TC(384, 96, PTA_ELE_KEY_GROUP_VOLATILE,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1, ELE_KEY_LIFETIME_VOLATILE,
+		   ELE_ALGO_ECDSA_SHA384, 0),
+	GEN_KEY_TC(521, 132, PTA_ELE_KEY_GROUP_VOLATILE,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1, ELE_KEY_LIFETIME_VOLATILE,
+		   ELE_ALGO_ECDSA_SHA512, 0),
+	GEN_KEY_TC(224, 56, PTA_ELE_KEY_GROUP_PERSISTENT,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1,
+		   ELE_KEY_LIFETIME_PERSISTENT, ELE_ALGO_ECDSA_SHA224, 1),
+	GEN_KEY_TC(256, 64, PTA_ELE_KEY_GROUP_PERSISTENT,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1,
+		   ELE_KEY_LIFETIME_PERSISTENT, ELE_ALGO_ECDSA_SHA256, 1),
+	GEN_KEY_TC(384, 96, PTA_ELE_KEY_GROUP_PERSISTENT,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1,
+		   ELE_KEY_LIFETIME_PERSISTENT, ELE_ALGO_ECDSA_SHA384, 1),
+	GEN_KEY_TC(521, 132, PTA_ELE_KEY_GROUP_PERSISTENT,
+		   ELE_KEY_TYPE_ECC_KEY_PAIR_SECP_R1,
+		   ELE_KEY_LIFETIME_PERSISTENT, ELE_ALGO_ECDSA_SHA512, 1),
+};
+
+static TEE_Result get_key_store_handle(uint32_t session_handle,
+				       uint32_t *key_store_handle)
+{
+	uint32_t ele_key_store_handle = 0;
+	TEE_Result res = TEE_ERROR_GENERIC;
+
+	if (!key_store_handle)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	/*
+	 * Since we have now enabled the NVM manager, we will first try to
+	 * open then Key store because there may be the case that same key
+	 * store is imported from master blob.
+	 * If there is no Key store with the same credentials then, we will
+	 * create a key store.
+	 */
+	res = imx_ele_key_store_open(session_handle, PTA_ELE_KEY_STORE_ID,
+				     PTA_ELE_KEY_STORE_AUTH_NONCE, false, false,
+				     false, &ele_key_store_handle);
+	if (res == TEE_ERROR_ITEM_NOT_FOUND) {
+		res = imx_ele_key_store_open(session_handle,
+					     PTA_ELE_KEY_STORE_ID,
+					     PTA_ELE_KEY_STORE_AUTH_NONCE, true,
+					     false, false,
+					     &ele_key_store_handle);
+	}
+	if (res != TEE_SUCCESS)
+		return res;
+
+	*key_store_handle = ele_key_store_handle;
+	return res;
+}
+
+static TEE_Result ele_generate_delete(const struct gen_key_test_case *tc,
+				      uint32_t key_mgmt_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint8_t *public_key = NULL;
+	uint32_t key_identifier = 0;
+
+	public_key = calloc(1, tc->public_key_size);
+	if (!public_key) {
+		EMSG("Public key memory allocation failed");
+		res = TEE_ERROR_OUT_OF_MEMORY;
+		goto out;
+	}
+
+	res = imx_ele_generate_key(key_mgmt_handle, tc->public_key_size,
+				   tc->key_group, tc->sync, false,
+				   tc->key_lifetime, PTA_ELE_ECC_KEY_USAGE,
+				   tc->key_type, tc->key_size,
+				   tc->permitted_algorithm,
+				   ELE_KEY_LIFECYCLE_DEVICE, public_key,
+				   &key_identifier);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key generation failed");
+		goto out;
+	}
+
+	res = imx_ele_delete_key(key_mgmt_handle, key_identifier, tc->sync,
+				 false);
+	if (res != TEE_SUCCESS)
+		EMSG("Key deletion failed");
+
+out:
+	free(public_key);
+	return res;
+}
+
+static TEE_Result
+pta_ele_test_key_generate_delete(uint32_t param_types,
+				 TEE_Param params[TEE_NUM_PARAMS] __unused)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t session_handle = 0;
+	uint32_t key_store_handle = 0;
+	uint32_t key_mgmt_handle = 0;
+	unsigned int i = 0;
+	unsigned int error = 0;
+
+	uint32_t exp_param_types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE);
+
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_session_open(&session_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Session open failed");
+		goto out;
+	}
+
+	res = get_key_store_handle(session_handle, &key_store_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key store open failed");
+		goto session_close;
+	}
+
+	res = imx_ele_key_mgmt_open(key_store_handle, &key_mgmt_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key management open failed");
+		goto key_store_close;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(gen_key_tc); i++) {
+		res = ele_generate_delete(&gen_key_tc[i], key_mgmt_handle);
+		if (res != TEE_SUCCESS) {
+			EMSG("ELE Generate/Delete failed");
+			error = 1;
+			break;
+		}
+	}
+
+	res = imx_ele_key_mgmt_close(key_mgmt_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Key Mgmt Close failed");
+
+key_store_close:
+	res = imx_ele_key_store_close(key_store_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Key Store Close failed");
+
+session_close:
+	res = imx_ele_session_close(session_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Session Close failed");
+
+out:
+	if (error)
+		res = TEE_ERROR_GENERIC;
+	return res;
+}
+
+/* Data for test */
+static const char test_data[] = "The quick brown fox jumps over the lazy dog";
+
+static TEE_Result ele_sign_verify(uint32_t session_handle,
+				  uint32_t key_store_handle,
+				  uint32_t key_identifier, uint8_t *public_key,
+				  size_t public_key_size, size_t key_size_bits,
+				  uint32_t key_type, uint32_t sig_scheme)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t sig_gen_handle = 0;
+	uint32_t sig_verify_handle = 0;
+	uint8_t *signature = NULL;
+	uint8_t *data = (uint8_t *)test_data;
+	unsigned int data_size = sizeof(test_data) - 1;
+
+	/*
+	 * Public key size and signature size is same for ECC key type
+	 */
+	size_t signature_size = public_key_size;
+
+	signature = calloc(1, signature_size);
+	if (!signature) {
+		EMSG("Signature  memory allocation failed");
+		return TEE_ERROR_OUT_OF_MEMORY;
+	}
+
+	res = imx_ele_sig_gen_open(key_store_handle, &sig_gen_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature generation service flow open failed");
+		goto out;
+	}
+
+	res = imx_ele_signature_generate(sig_gen_handle, key_identifier, data,
+					 data_size, signature, signature_size,
+					 sig_scheme,
+					 ELE_SIG_GEN_MSG_TYPE_MESSAGE);
+	if (res != TEE_SUCCESS)
+		EMSG("Signature generation failed");
+
+	res |= imx_ele_sig_gen_close(sig_gen_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature generation flow close failed");
+		goto out;
+	}
+
+	res = imx_ele_sig_verify_open(session_handle, &sig_verify_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Signature verification service flow open failed");
+		goto out;
+	}
+
+	res = imx_ele_signature_verification(sig_verify_handle,
+					     public_key, data,
+					     data_size, signature,
+					     signature_size, public_key_size,
+					     key_size_bits, key_type,
+					     sig_scheme,
+					     ELE_SIG_GEN_MSG_TYPE_MESSAGE);
+	if (res != TEE_SUCCESS)
+		EMSG("Signature verification failed");
+
+	res |= imx_ele_sig_verify_close(sig_verify_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Signature verification flow close failed");
+
+out:
+	free(signature);
+	return res;
+}
+
+static TEE_Result ele_gen_del_sign_verify(const struct gen_key_test_case *tc,
+					  uint32_t key_mgmt_handle,
+					  uint32_t session_handle,
+					  uint32_t key_store_handle)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint8_t *public_key = NULL;
+	uint32_t key_identifier = 0;
+	unsigned int error = 0;
+
+	public_key = calloc(1, tc->public_key_size);
+	if (!public_key) {
+		EMSG("Public key memory allocation failed");
+		res = TEE_ERROR_OUT_OF_MEMORY;
+		goto out;
+	}
+
+	res = imx_ele_generate_key(key_mgmt_handle, tc->public_key_size,
+				   tc->key_group, tc->sync, false,
+				   tc->key_lifetime, PTA_ELE_ECC_KEY_USAGE,
+				   tc->key_type, tc->key_size,
+				   tc->permitted_algorithm,
+				   ELE_KEY_LIFECYCLE_DEVICE, public_key,
+				   &key_identifier);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key generation failed");
+		goto out;
+	}
+
+	res = ele_sign_verify(session_handle, key_store_handle, key_identifier,
+			      public_key, tc->public_key_size, tc->key_size,
+			      ELE_KEY_TYPE_ECC_PUB_KEY_SECP_R1,
+			      tc->permitted_algorithm);
+	if (res != TEE_SUCCESS) {
+		EMSG("Sign Verify test failed");
+		error = 1;
+	}
+
+	res = imx_ele_delete_key(key_mgmt_handle, key_identifier, tc->sync,
+				 false);
+	if (res != TEE_SUCCESS)
+		EMSG("Key deletion failed");
+
+out:
+	free(public_key);
+	if (error && !res)
+		res = TEE_ERROR_GENERIC;
+	return res;
+}
+
+static TEE_Result
+pta_ele_test_sign_verify(uint32_t param_types,
+			 TEE_Param params[TEE_NUM_PARAMS] __unused)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t session_handle = 0;
+	uint32_t key_store_handle = 0;
+	uint32_t key_mgmt_handle = 0;
+	unsigned int i = 0;
+	unsigned int error = 0;
+
+	uint32_t exp_param_types =
+		TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE,
+				TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE);
+
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = imx_ele_session_open(&session_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Session open failed");
+		goto out;
+	}
+
+	res = get_key_store_handle(session_handle, &key_store_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key store open failed");
+		goto session_close;
+	}
+
+	res = imx_ele_key_mgmt_open(key_store_handle, &key_mgmt_handle);
+	if (res != TEE_SUCCESS) {
+		EMSG("Key management open failed");
+		goto key_store_close;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(gen_key_tc); i++) {
+		res = ele_gen_del_sign_verify(&gen_key_tc[i], key_mgmt_handle,
+					      session_handle, key_store_handle);
+		if (res != TEE_SUCCESS) {
+			EMSG("ELE Gen delete with sign/verify failed");
+			error = 1;
+			break;
+		}
+	}
+
+	res = imx_ele_key_mgmt_close(key_mgmt_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Key Mgmt Close failed");
+
+key_store_close:
+	res = imx_ele_key_store_close(key_store_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Key Store Close failed");
+
+session_close:
+	res = imx_ele_session_close(session_handle);
+	if (res != TEE_SUCCESS)
+		EMSG("Session Close failed");
+
+out:
+	if (error)
+		res = TEE_ERROR_GENERIC;
+	return res;
+}
+
+static TEE_Result pta_ele_test_invoke_cmd(void *sess_ctx __unused,
+					  uint32_t cmd_id, uint32_t param_types,
+					  TEE_Param params[TEE_NUM_PARAMS])
+{
+	switch (cmd_id) {
+	case PTA_ELE_CMD_TEST_KEY_GENERATE_DELETE:
+		return pta_ele_test_key_generate_delete(param_types, params);
+	case PTA_ELE_CMD_TEST_SIGN_VERIFY:
+		return pta_ele_test_sign_verify(param_types, params);
+	default:
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+}
+
+pseudo_ta_register(.uuid = PTA_ELE_TEST_UUID, .name = PTA_NAME,
+		   .flags = PTA_DEFAULT_FLAGS,
+		   .invoke_command_entry_point = pta_ele_test_invoke_cmd);
diff --git a/core/pta/imx/ocotp.c b/core/pta/imx/ocotp.c
index 97dc4df3e..ee895b95d 100644
--- a/core/pta/imx/ocotp.c
+++ b/core/pta/imx/ocotp.c
@@ -1,8 +1,14 @@
 // SPDX-License-Identifier: BSD-2-Clause
 /*
- * Copyright 2021 NXP
+ * Copyright 2021, 2023 NXP
  */
+
+#ifdef CFG_IMX_ELE
+#include <drivers/ele_extension.h>
+#endif
+#ifdef CFG_IMX_OCOTP
 #include <drivers/imx_ocotp.h>
+#endif
 #include <kernel/pseudo_ta.h>
 #include <kernel/tee_common_otp.h>
 #include <pta_imx_ocotp.h>
diff --git a/core/pta/imx/sub.mk b/core/pta/imx/sub.mk
index 6209e97e7..856216bdd 100644
--- a/core/pta/imx/sub.mk
+++ b/core/pta/imx/sub.mk
@@ -1,4 +1,6 @@
 srcs-$(CFG_IMX_DIGPROG) += digprog.c
-srcs-$(CFG_IMX_OCOTP) += ocotp.c
+srcs-$(call cfg-one-enabled,CFG_IMX_OCOTP CFG_IMX_ELE) += ocotp.c
 srcs-$(CFG_NXP_CAAM_MP_DRV) += manufacturing_protection.c
 srcs-$(CFG_NXP_CAAM_DEK_DRV) += dek_blob.c
+srcs-$(CFG_IMX_TRUSTED_ARM_CE) += trusted_arm_ce.c
+srcs-$(CFG_IMX_ELE_TEST_PTA) += ele_test.c
diff --git a/core/pta/imx/trusted_arm_ce.c b/core/pta/imx/trusted_arm_ce.c
new file mode 100644
index 000000000..359734759
--- /dev/null
+++ b/core/pta/imx/trusted_arm_ce.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright 2023 NXP
+ */
+#include <assert.h>
+#include <crypto/crypto_accel.h>
+#include <drivers/ele_extension.h>
+#include <kernel/huk_subkey.h>
+#include <kernel/pseudo_ta.h>
+#include <kernel/user_mode_ctx.h>
+#if defined(CFG_WITH_VFP)
+#include <kernel/vfp.h>
+#endif
+#include <mm/core_memprot.h>
+#include <mm/core_mmu.h>
+#include <mm/vm.h>
+#include <pta_imx_trusted_arm_ce.h>
+#include <stdint.h>
+#include <string.h>
+#include <string_ext.h>
+
+#ifndef CFG_CORE_RESERVED_SHM
+#error "CFG_CORE_RESERVED_SHM is required"
+#endif
+#ifndef CFG_CORE_DYN_SHM
+#error "CFG_CORE_DYN_SHM is required"
+#endif
+
+#define TRUSTED_ARM_CE_PTA_NAME "trusted_arm_ce.pta"
+
+#define AES_KEYSIZE_128 16U
+#define AES_KEYSIZE_256 32U
+
+/*
+ * Maximum expanded key size in words is equal to 15 * 16 bytes
+ * (maximum number of rounds * size of an aes block)
+ * Please refer to crypto_accel_aes_expand_keys.
+ */
+#define EXPANDED_KEY_SIZE 60
+
+#define SHM_CACHE_ATTRS                                                    \
+	((uint32_t)(core_mmu_is_shm_cached() ? TEE_MATTR_MEM_TYPE_CACHED : \
+					       TEE_MATTR_MEM_TYPE_DEV))
+
+#if defined(CFG_MX93)
+#define OCRAM_START 0x20518000
+#define OCRAM_END 0x2051C000
+#elif defined(CFG_MX95)
+#define OCRAM_START 0x204BC000
+#define OCRAM_END 0x204C0000
+#else
+#error "Platform not supported"
+#endif
+
+static_assert(OCRAM_END > OCRAM_START);
+
+#define OCRAM_SIZE (OCRAM_END - OCRAM_START)
+
+/* Maximum number of keys in memory */
+#define MAX_NUMBER_KEYS (OCRAM_SIZE / sizeof(struct symmetric_key))
+
+/*
+ * This structure size must be aligned on 64 bytes,
+ * cause imx_ele_derive_key need cache aligned key buffer.
+ */
+struct symmetric_key {
+	uint8_t key_buffer[TEE_AES_BLOCK_SIZE];
+	uint32_t enc_key[EXPANDED_KEY_SIZE];
+	uint32_t dec_key[EXPANDED_KEY_SIZE];
+	size_t key_size;
+	uint32_t key_id;
+	uint32_t round_count;
+	bool allocated;
+} __aligned(64);
+
+/* Physical Secure OCRAM pool */
+static tee_mm_pool_t tee_mm_sec_ocram;
+static tee_mm_pool_t tee_mm_nsec_shm;
+static void *sec_ocram_base;
+
+static struct symmetric_key *key_storage;
+static struct mutex key_storage_mutex = MUTEX_INITIALIZER;
+
+/*
+ * Get symmetric key with given key identifier
+ *
+ * [in]	key_id	Symmetric key identifier
+ *
+ * return a symmetric_key structure pointer matching key_id
+ * or NULL if not found
+ */
+static struct symmetric_key *get_client_key(uint32_t key_id)
+{
+	if (key_storage) {
+		unsigned int key_idx = 0;
+
+		for (key_idx = 0; key_idx < MAX_NUMBER_KEYS; key_idx++) {
+			struct symmetric_key *entry = &key_storage[key_idx];
+
+			if (entry->key_id == key_id)
+				return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Get symmetric key with given key identifier or add it if not found
+ *
+ * [in]	key_id	Symmetric key identifier
+ *
+ * return a symmetric_key structure pointer
+ */
+static struct symmetric_key *add_client_key(uint32_t key_id)
+{
+	struct symmetric_key *entry = NULL;
+
+	mutex_lock(&key_storage_mutex);
+
+	entry = get_client_key(key_id);
+	if (!entry && key_storage) {
+		unsigned int key_idx = 0;
+
+		for (key_idx = 0; key_idx < MAX_NUMBER_KEYS; key_idx++) {
+			if (!key_storage[key_idx].allocated) {
+				entry = &key_storage[key_idx];
+				entry->allocated = true;
+				entry->key_id = key_id;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&key_storage_mutex);
+
+	return entry;
+}
+
+/*
+ * Remove a secret key according to key id
+ *
+ * [in]	key_id	Symmetric key identifier
+ */
+static TEE_Result remove_client_key(uint32_t key_id)
+{
+	struct symmetric_key *entry = NULL;
+
+	mutex_lock(&key_storage_mutex);
+
+	entry = get_client_key(key_id);
+	if (entry)
+		memzero_explicit(entry, sizeof(struct symmetric_key));
+
+	mutex_unlock(&key_storage_mutex);
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Get static shared memory buffer virtual address from physical address
+ *
+ * [in] pa	buffer physical address
+ * [in] size	buffer size
+ *
+ * return buffer virtual address or NULL if no match found
+ */
+static inline void *nsec_shm_phys_to_virt(paddr_t pa, size_t size)
+{
+	return phys_to_virt(pa, MEM_AREA_NSEC_SHM, size);
+}
+
+/*
+ * Deserialize physical memory address and size
+ *
+ * [in]     buffer	serialized data buffer
+ * [in/out] size	serialized data size
+ * [out]    pa		physical address
+ * [out]    sz		size
+ *
+ * return updated serialized data buffer address
+ */
+static uint8_t *deserialize_memref(uint8_t *buffer, size_t *size, paddr_t *pa,
+				   size_t *sz)
+{
+	if (!buffer)
+		goto out;
+
+	if (*size < sizeof(paddr_t))
+		goto out;
+	memcpy(pa, buffer, sizeof(paddr_t));
+	buffer += sizeof(paddr_t);
+	*size -= sizeof(paddr_t);
+
+	if (*size < sizeof(size_t))
+		goto out;
+	memcpy(sz, buffer, sizeof(size_t));
+	buffer += sizeof(size_t);
+	*size -= sizeof(size_t);
+out:
+	return buffer;
+}
+
+/*
+ * Parse TEE parameters
+ *
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ * [out]   key_id_1	AES key id 1
+ * [out]   key_id_2	AES key id 2 [optional]
+ * [out]   iv		iv physical address
+ * [out]   ivlen	iv size
+ * [out]   srcdata	source physical address
+ * [out]   srclen	source size
+ * [out]   dstdata	destination physical address
+ * [out]   dstlen	destination size
+ */
+static TEE_Result parse_params(uint32_t param_types,
+			       TEE_Param params[TEE_NUM_PARAMS],
+			       uint32_t *key_id_1, uint32_t *key_id_2,
+			       paddr_t *iv, size_t *ivlen, paddr_t *srcdata,
+			       size_t *srclen, paddr_t *dstdata, size_t *dstlen)
+{
+	uint8_t *buffer = NULL;
+	size_t buffer_size = 0;
+	uint32_t exp_param_types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
+						   TEE_PARAM_TYPE_VALUE_INPUT,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE);
+
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	buffer = params[0].memref.buffer;
+	buffer_size = params[0].memref.size;
+
+	buffer = deserialize_memref(buffer, &buffer_size, srcdata, srclen);
+	buffer = deserialize_memref(buffer, &buffer_size, dstdata, dstlen);
+	buffer = deserialize_memref(buffer, &buffer_size, iv, ivlen);
+
+	*key_id_1 = params[1].value.a;
+	if (key_id_2)
+		*key_id_2 = params[1].value.b;
+
+	return TEE_SUCCESS;
+}
+
+TEE_Result cipher_cbc(uint32_t key_id, paddr_t iv, paddr_t srcdata,
+		      size_t srclen, paddr_t dstdata, size_t dstlen,
+		      bool encrypt)
+{
+	struct symmetric_key *key = NULL;
+	uint8_t *nonce = NULL;
+	uint8_t *src = NULL;
+	uint8_t *dst = NULL;
+
+	if (srclen % TEE_AES_BLOCK_SIZE || dstlen % TEE_AES_BLOCK_SIZE)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	nonce = nsec_shm_phys_to_virt(iv, TEE_AES_BLOCK_SIZE);
+	src = nsec_shm_phys_to_virt(srcdata, srclen);
+	dst = nsec_shm_phys_to_virt(dstdata, dstlen);
+	if (!nonce || !src || !dst)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	key = get_client_key(key_id);
+	if (!key)
+		return TEE_ERROR_ITEM_NOT_FOUND;
+
+	if (encrypt) {
+		/* encrypt srcdata in destdata */
+		if (vfp_is_enabled())
+			ce_aes_cbc_encrypt(dst, src, (uint8_t *)key->enc_key,
+					   key->round_count,
+					   srclen / TEE_AES_BLOCK_SIZE, nonce);
+		else
+			crypto_accel_aes_cbc_enc(dst, src,
+						 (uint8_t *)key->enc_key,
+						 key->round_count,
+						 srclen / TEE_AES_BLOCK_SIZE,
+						 nonce);
+	} else {
+		/* decrypt srcdata in destdata */
+		if (vfp_is_enabled())
+			ce_aes_cbc_decrypt(dst, src, (uint8_t *)key->dec_key,
+					   key->round_count,
+					   srclen / TEE_AES_BLOCK_SIZE, nonce);
+		else
+			crypto_accel_aes_cbc_dec(dst, src,
+						 (uint8_t *)key->dec_key,
+						 key->round_count,
+						 srclen / TEE_AES_BLOCK_SIZE,
+						 nonce);
+	}
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Do AES CBC Cipher operation
+ *
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ * [in]    encrypt	True for encryption, false otherwise
+ */
+static TEE_Result pta_cipher_cbc(uint32_t param_types,
+				 TEE_Param params[TEE_NUM_PARAMS], bool encrypt)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	paddr_t iv = 0;
+	size_t ivlen = 0;
+	paddr_t srcdata = 0;
+	paddr_t destdata = 0;
+	size_t srclen = 0;
+	size_t destlen = 0;
+	uint32_t key_id = 0;
+
+	res = parse_params(param_types, params, &key_id, NULL, &iv, &ivlen,
+			   &srcdata, &srclen, &destdata, &destlen);
+	if (res)
+		return res;
+
+	return cipher_cbc(key_id, iv, srcdata, srclen, destdata, destlen,
+			  encrypt);
+}
+
+TEE_Result cipher_xts(uint32_t key_id_1, uint32_t key_id_2, paddr_t iv,
+		      paddr_t srcdata, size_t srclen, paddr_t dstdata,
+		      size_t dstlen, bool encrypt)
+{
+	struct symmetric_key *key1 = NULL;
+	struct symmetric_key *key2 = NULL;
+	uint8_t *nonce = NULL;
+	uint8_t *src = NULL;
+	uint8_t *dst = NULL;
+
+	if (srclen % TEE_AES_BLOCK_SIZE || dstlen % TEE_AES_BLOCK_SIZE)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	nonce = nsec_shm_phys_to_virt(iv, TEE_AES_BLOCK_SIZE);
+	src = nsec_shm_phys_to_virt(srcdata, srclen);
+	dst = nsec_shm_phys_to_virt(dstdata, dstlen);
+
+	if (!nonce || !src || !dst)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	key1 = get_client_key(key_id_1);
+	if (!key1)
+		return TEE_ERROR_ITEM_NOT_FOUND;
+
+	key2 = get_client_key(key_id_2);
+	if (!key2)
+		return TEE_ERROR_ITEM_NOT_FOUND;
+
+	if (encrypt) {
+		/* encrypt srcdata in destdata */
+		if (vfp_is_enabled())
+			ce_aes_xts_encrypt(dst, src, (uint8_t *)key1->enc_key,
+					   key1->round_count,
+					   srclen / TEE_AES_BLOCK_SIZE,
+					   (uint8_t *)key2->enc_key, nonce);
+		else
+			crypto_accel_aes_xts_enc(dst, src,
+						 (uint8_t *)key1->enc_key,
+						 key1->round_count,
+						 srclen / TEE_AES_BLOCK_SIZE,
+						 (uint8_t *)key2->enc_key,
+						 nonce);
+	} else {
+		/* decrypt srcdata in destdata */
+		if (vfp_is_enabled())
+			ce_aes_xts_decrypt(dst, src, (uint8_t *)key1->dec_key,
+					   key1->round_count,
+					   srclen / TEE_AES_BLOCK_SIZE,
+					   (uint8_t *)key2->enc_key, nonce);
+		else
+			crypto_accel_aes_xts_dec(dst, src,
+						 (uint8_t *)key1->dec_key,
+						 key1->round_count,
+						 srclen / TEE_AES_BLOCK_SIZE,
+						 (uint8_t *)key2->enc_key,
+						 nonce);
+	}
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Do AES XTS Cipher operation
+ *
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ * [in]    encrypt	True for encryption, false otherwise
+ */
+static TEE_Result pta_cipher_xts(uint32_t param_types,
+				 TEE_Param params[TEE_NUM_PARAMS], bool encrypt)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	paddr_t iv = 0;
+	size_t ivlen = 0;
+	paddr_t srcdata = 0;
+	paddr_t destdata = 0;
+	size_t srclen = 0;
+	size_t destlen = 0;
+	uint32_t key_id_1 = 0;
+	uint32_t key_id_2 = 0;
+
+	res = parse_params(param_types, params, &key_id_1, &key_id_2, &iv,
+			   &ivlen, &srcdata, &srclen, &destdata, &destlen);
+	if (res)
+		return res;
+
+	return cipher_xts(key_id_1, key_id_2, iv, srcdata, srclen, destdata,
+			  destlen, encrypt);
+}
+
+/*
+ * Return true if key size is supported
+ *
+ * [in]    key_size	key size in bytes
+ */
+static TEE_Result is_key_size_supported(size_t key_size)
+{
+	switch (key_size) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	default:
+		return TEE_ERROR_BAD_PARAMETERS;
+	}
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Add or update a secret key
+ *
+ * [in]    key_id	AES key id
+ * [in]    salt		salt used for key generation
+ * [in]    length	size of the input salt
+ */
+static TEE_Result set_key(uint32_t key_id, uint8_t *salt, size_t length)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	struct symmetric_key *key = NULL;
+	void *keybuffer = NULL;
+	void *enc_key = NULL;
+	void *dec_key = NULL;
+	uint32_t *round_count = NULL;
+
+	if (!key_id)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	key = add_client_key(key_id);
+	if (!key)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	keybuffer = key->key_buffer;
+	enc_key = key->enc_key;
+	dec_key = key->dec_key;
+	round_count = &key->round_count;
+
+	res = imx_ele_derive_key(salt, length, keybuffer, length);
+	if (res)
+		goto out;
+
+	res = crypto_accel_aes_expand_keys(keybuffer, length, enc_key, dec_key,
+					   sizeof(key->enc_key), round_count);
+	if (res)
+		goto out;
+
+	key->key_size = length;
+
+	return TEE_SUCCESS;
+out:
+	memzero_explicit(key, sizeof(struct symmetric_key));
+
+	return res;
+}
+
+/*
+ * Add or update a secret key
+ *
+ * [in]    nCommandID	PTA_SET_XTS_KEY or PTA_SET_CBC_KEY
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ */
+static TEE_Result pta_set_key(uint32_t nCommandID, uint32_t param_types,
+			      TEE_Param params[TEE_NUM_PARAMS])
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint8_t *salt = NULL;
+	size_t salt_length = 0;
+	uint32_t key_id_1 = 0;
+	uint32_t key_id_2 = 0;
+
+	uint32_t exp_param_types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
+						   TEE_PARAM_TYPE_VALUE_INPUT,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE);
+
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	salt = params[0].memref.buffer;
+	salt_length = params[0].memref.size;
+	key_id_1 = params[1].value.a;
+	key_id_2 = params[1].value.b;
+
+	if (!salt || !salt_length)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (nCommandID == PTA_SET_XTS_KEY) {
+		/* we get two keys salt */
+		if (salt_length % 2)
+			return TEE_ERROR_BAD_PARAMETERS;
+
+		salt_length = salt_length / 2;
+	}
+
+	/* key salt length is key length */
+	res = is_key_size_supported(salt_length);
+	if (res)
+		return res;
+
+	res = set_key(key_id_1, salt, salt_length);
+	if (res)
+		return res;
+
+	if (nCommandID == PTA_SET_XTS_KEY)
+		res = set_key(key_id_2, salt + salt_length, salt_length);
+
+	return res;
+}
+
+/*
+ * Remove secrets keys according to key id
+ *
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ */
+static TEE_Result pta_remove_key(uint32_t param_types,
+				 TEE_Param params[TEE_NUM_PARAMS])
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	uint32_t key_id_1 = 0;
+	uint32_t key_id_2 = 0;
+
+	uint32_t exp_param_types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE);
+
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	key_id_1 = params[0].value.a;
+	key_id_2 = params[0].value.b;
+
+	res = remove_client_key(key_id_1);
+	if (res)
+		return res;
+
+	if (key_id_2)
+		res = remove_client_key(key_id_2);
+
+	return res;
+}
+
+/*
+ * Allocate a buffer in ocram heap
+ *
+ * [out]   va		allocated buffer address
+ * [in]    alloc_size	allocation size
+ */
+static TEE_Result ocram_allocate(vaddr_t *va, size_t alloc_size)
+{
+	tee_mm_entry_t *mm = NULL;
+	size_t size = OCRAM_SIZE;
+
+	if (!alloc_size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (alloc_size > size)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	mm = tee_mm_alloc(&tee_mm_sec_ocram, alloc_size);
+	if (!mm)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	*va = (vaddr_t)phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_RAM_SEC,
+				    alloc_size);
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Free a buffer in ocram heap
+ *
+ * [in] va		allocated buffer address
+ */
+static TEE_Result ocram_free(vaddr_t va)
+{
+	tee_mm_entry_t *mm = NULL;
+	paddr_t pa = 0;
+
+	if (!va)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	pa = virt_to_phys((void *)va);
+
+	mm = tee_mm_find(&tee_mm_sec_ocram, pa);
+	if (!mm)
+		return TEE_ERROR_ITEM_NOT_FOUND;
+
+	tee_mm_free(mm);
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Allocate a static shared memory
+ *
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ */
+static TEE_Result pta_shm_allocate(uint32_t param_types,
+				   TEE_Param params[TEE_NUM_PARAMS])
+{
+	tee_mm_entry_t *mm = NULL;
+	size_t alloc_size = 0;
+	paddr_t pa = 0;
+
+	uint32_t exp_param_types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
+						   TEE_PARAM_TYPE_VALUE_OUTPUT,
+						   TEE_PARAM_TYPE_NONE,
+						   TEE_PARAM_TYPE_NONE);
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	alloc_size = reg_pair_to_64(params[0].value.a, params[0].value.b);
+	if (!alloc_size)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	mm = tee_mm_alloc(&tee_mm_nsec_shm, alloc_size);
+	if (!mm)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	pa = tee_mm_get_smem(mm);
+
+	reg_pair_from_64(pa, &params[1].value.a, &params[1].value.b);
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Free a static shared memory
+ *
+ * [in]    param_types	command param type
+ * [in]    params	command parameters
+ */
+static TEE_Result pta_shm_free(uint32_t param_types,
+			       TEE_Param params[TEE_NUM_PARAMS])
+{
+	tee_mm_entry_t *mm = NULL;
+	paddr_t pa = 0;
+
+	uint32_t exp_param_types =
+		TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT, TEE_PARAM_TYPE_NONE,
+				TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE);
+
+	if (param_types != exp_param_types)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	pa = reg_pair_to_64(params[0].value.a, params[0].value.b);
+	if (!pa)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	mm = tee_mm_find(&tee_mm_nsec_shm, pa);
+	if (!mm)
+		return TEE_ERROR_ITEM_NOT_FOUND;
+
+	tee_mm_free(mm);
+
+	return TEE_SUCCESS;
+}
+
+/*
+ * Called when a pseudo TA instance is created.
+ */
+static TEE_Result trusted_arm_ce_create(void)
+{
+	TEE_Result res = TEE_ERROR_GENERIC;
+	paddr_t ps = OCRAM_START;
+	size_t size = OCRAM_SIZE;
+	vaddr_t va = 0;
+
+	sec_ocram_base = core_mmu_add_mapping(MEM_AREA_RAM_SEC, ps, size);
+	if (!sec_ocram_base)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	memzero_explicit(sec_ocram_base, size);
+
+	if (!tee_mm_init(&tee_mm_sec_ocram, ps, size, CORE_MMU_USER_CODE_SHIFT,
+			 TEE_MM_POOL_NO_FLAGS)) {
+		res = TEE_ERROR_OUT_OF_MEMORY;
+		goto out;
+	}
+
+	res = ocram_allocate(&va, ROUNDUP(MAX_NUMBER_KEYS *
+			     sizeof(struct symmetric_key), SIZE_4K));
+	if (res)
+		goto out;
+
+	key_storage = (struct symmetric_key *)va;
+
+	/*
+	 * Add tee_mm_nsec_shm memory pool on the static shm area.
+	 * Doing that we reserve it for the PTA shm allocation,
+	 * as the area will not be used by Linux when Dynamic shm is enabled.
+	 */
+	if (!tee_mm_init(&tee_mm_nsec_shm, default_nsec_shm_paddr,
+			 default_nsec_shm_size, CORE_MMU_USER_CODE_SHIFT,
+			 TEE_MM_POOL_NO_FLAGS)) {
+		res = TEE_ERROR_OUT_OF_MEMORY;
+		goto out;
+	}
+
+	return TEE_SUCCESS;
+out:
+	if (key_storage)
+		ocram_free((vaddr_t)key_storage);
+	tee_mm_final(&tee_mm_sec_ocram);
+	core_mmu_remove_mapping(MEM_AREA_RAM_SEC, sec_ocram_base, size);
+	return res;
+}
+
+/*
+ * Called when a pseudo TA instance is destroyed.
+ */
+static void trusted_arm_ce_destroy(void)
+{
+	size_t size = ROUNDUP(MAX_NUMBER_KEYS * sizeof(struct symmetric_key),
+			      SIZE_4K);
+
+	if (key_storage) {
+		memzero_explicit(key_storage, size);
+
+		ocram_free((vaddr_t)key_storage);
+	}
+
+	tee_mm_final(&tee_mm_sec_ocram);
+	tee_mm_final(&tee_mm_nsec_shm);
+
+	size = OCRAM_SIZE;
+	memzero_explicit(sec_ocram_base, size);
+
+	core_mmu_remove_mapping(MEM_AREA_RAM_SEC, sec_ocram_base, size);
+}
+
+/*
+ * Called when this pseudo TA is invoked.
+ *
+ * sess_ctx    Session Identifier
+ * cmd_id      Command ID
+ * param_types Parameter types
+ * prms        Buffer or value parameters
+ */
+static TEE_Result trusted_arm_ce_invoke_command(void *sess_ctx __unused,
+						uint32_t cmd_id,
+						uint32_t param_types,
+						TEE_Param prms[TEE_NUM_PARAMS])
+{
+	switch (cmd_id) {
+	case PTA_SHM_ALLOCATE:
+		return pta_shm_allocate(param_types, prms);
+	case PTA_SHM_FREE:
+		return pta_shm_free(param_types, prms);
+	case PTA_SET_XTS_KEY:
+	case PTA_SET_CBC_KEY:
+		return pta_set_key(cmd_id, param_types, prms);
+	case PTA_REMOVE_KEY:
+		return pta_remove_key(param_types, prms);
+	case PTA_ENCRYPT_CBC:
+		return pta_cipher_cbc(param_types, prms, true);
+	case PTA_DECRYPT_CBC:
+		return pta_cipher_cbc(param_types, prms, false);
+	case PTA_ENCRYPT_XTS:
+		return pta_cipher_xts(param_types, prms, true);
+	case PTA_DECRYPT_XTS:
+		return pta_cipher_xts(param_types, prms, false);
+	default:
+		break;
+	}
+
+	return TEE_ERROR_BAD_PARAMETERS;
+}
+
+pseudo_ta_register(.uuid = PTA_TRUSTED_ARM_CE_UUID,
+		   .name = TRUSTED_ARM_CE_PTA_NAME,
+		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
+		   .create_entry_point = trusted_arm_ce_create,
+		   .destroy_entry_point = trusted_arm_ce_destroy,
+		   .invoke_command_entry_point = trusted_arm_ce_invoke_command);
diff --git a/core/tee/entry_std.c b/core/tee/entry_std.c
index 607ebd4b5..7d4339ed6 100644
--- a/core/tee/entry_std.c
+++ b/core/tee/entry_std.c
@@ -582,6 +582,18 @@ err:
 	return res;
 }
 
+bool tee_entry_is_sdp_cached(void)
+{
+#ifdef CFG_SECURE_DATA_PATH
+	uint32_t cattr;
+
+	if (!mobj_get_mem_type(*sdp_mem_mobjs, &cattr))
+		return cattr & TEE_MATTR_MEM_TYPE_CACHED;;
+#endif
+
+	return false;
+}
+
 static TEE_Result default_mobj_init(void)
 {
 #ifdef CFG_CORE_RESERVED_SHM
diff --git a/lib/libutee/include/pta_ele_test.h b/lib/libutee/include/pta_ele_test.h
new file mode 100644
index 000000000..7a543f133
--- /dev/null
+++ b/lib/libutee/include/pta_ele_test.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef PTA_ELE_TEST_H
+#define PTA_ELE_TEST_H
+
+#define PTA_ELE_TEST_UUID                                              \
+	{                                                              \
+		0x6bd8ac83, 0x592e, 0x4c81,                            \
+		{                                                      \
+			0x8a, 0xb3, 0x4a, 0x2c, 0x30, 0xc9, 0xf6, 0x27 \
+		}                                                      \
+	}
+
+/*
+ * Test Generation/Deletion of Persistent and volatile keys.
+ */
+#define PTA_ELE_CMD_TEST_KEY_GENERATE_DELETE 0
+
+/*
+ * Test Signing and verification operation with keys.
+ */
+#define PTA_ELE_CMD_TEST_SIGN_VERIFY 1
+
+#endif /* PTA_ELE_TEST_H */
diff --git a/lib/libutee/include/pta_i2c_rtc_test.h b/lib/libutee/include/pta_i2c_rtc_test.h
new file mode 100644
index 000000000..1f05446c0
--- /dev/null
+++ b/lib/libutee/include/pta_i2c_rtc_test.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2021 NXP
+ *
+ * brief   PTA I2C RTC Test interface identification.
+ */
+#ifndef __PTA_I2C_RTC_TEST_H__
+#define __PTA_I2C_RTC_TEST_H__
+
+/* PTA UUID generated at http://www.itu.int/ITU-T/asn1/uuid.html */
+#define PTA_LS_I2C_RTC_TEST_SUITE_UUID \
+	{ \
+		0x4daa5ac7, 0xe9d2, 0x498f, \
+		{ \
+			0xa2, 0x4a, 0x4b, 0x2e, 0xab, 0x7b, 0x4b, 0x01 \
+		} \
+	}
+
+/*
+ * Commands Definition
+ */
+/* Get RTC time connected to I2C */
+#define PTA_CMD_I2C_RTC_RUN_TEST_SUITE 0
+
+#endif /* __PTA_I2C_RTC_TEST_H__ */
diff --git a/lib/libutee/include/pta_imx_trusted_arm_ce.h b/lib/libutee/include/pta_imx_trusted_arm_ce.h
new file mode 100644
index 000000000..5e5db4273
--- /dev/null
+++ b/lib/libutee/include/pta_imx_trusted_arm_ce.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2023 NXP
+ */
+#ifndef __PTA_IMX_TRUSTED_ARM_CE_H__
+#define __PTA_IMX_TRUSTED_ARM_CE_H__
+
+#include <tee_api_types.h>
+
+#define PTA_TRUSTED_ARM_CE_UUID                                        \
+	{                                                              \
+		0x560c5231, 0x71bc, 0x476d,                            \
+		{                                                      \
+			0x8c, 0x2e, 0x4b, 0xa1, 0x07, 0x99, 0x1e, 0x72 \
+		}                                                      \
+	}
+
+/*
+ * Set AES CBC symmetric key
+ *
+ * [in]     param[0].memref        Salt used to derive a key
+ */
+#define PTA_SET_CBC_KEY 0
+
+/*
+ * Set AES XTS symmetric keys
+ *
+ * [in]     param[0].memref        Salt used to derive keys
+ */
+#define PTA_SET_XTS_KEY 1
+
+/*
+ * Remove secrets keys according to key id
+ *
+ * [in]     param[0].value.a       Key id 1
+ * [in]     param[0].value.b       Key id 2
+ */
+#define PTA_REMOVE_KEY  2
+
+/*
+ * Do AES CBC Encryption
+ *
+ * [in]     param[0].memref        Parameters buffer
+ * [in]     param[1].value.a       Key id
+ */
+#define PTA_ENCRYPT_CBC 3
+
+/*
+ * Do AES CBC Decryption
+ *
+ * [in]     param[0].memref        Parameters buffer
+ * [in]     param[1].value.a       Key id
+ */
+#define PTA_DECRYPT_CBC 4
+
+/*
+ * Do AES XTS Encryption
+ *
+ * [in]     param[0].memref        Parameters buffer
+ * [in]     param[1].value.a       Key id 1
+ * [in]     param[1].value.b       Key id 2
+ */
+#define PTA_ENCRYPT_XTS 5
+
+/*
+ * Do AES XTS Decryption
+ *
+ * [in]     param[0].memref        Parameters buffer
+ * [in]     param[1].value.a       Key id 1
+ * [in]     param[1].value.b       Key id 2
+ */
+#define PTA_DECRYPT_XTS 6
+
+/*
+ * Allocate a static shared memory buffer
+ *
+ * [in]     param[0].value.a       Buffer size
+ * [out]    param[1].value.a       MSB Buffer physical address
+ * [out]    param[1].value.b       LSB Buffer physical address
+ */
+#define PTA_SHM_ALLOCATE 7
+
+/*
+ * Free a static shared memory buffer
+ *
+ * [in]    param[0].value.a       MSB Buffer physical address
+ * [in]    param[0].value.b       LSB Buffer physical address
+ */
+#define PTA_SHM_FREE	 8
+
+/*
+ * Do AES CBC Cipher operation
+ *
+ * [in]     key_id   Key id
+ * [in]     iv       Initial vector physical address
+ * [in]     srcdata  Input buffer physical address
+ * [in]     srclen   Input buffer size
+ * [out]    dstdata  Output buffer physical address
+ * [out]    dstlen   Output buffer size
+ * [in]     encrypt  True for encryption, false otherwise
+ */
+TEE_Result cipher_cbc(uint32_t key_id, paddr_t iv, paddr_t srcdata,
+		      size_t srclen, paddr_t dstdata, size_t dstlen,
+		      bool encrypt);
+
+/*
+ * Do AES XTS Cipher operation
+ *
+ * [in]     key_id_1 First key id
+ * [in]     key_id_2 Second key id
+ * [in]     iv       Initial vector physical address
+ * [in]     srcdata  Input buffer physical address
+ * [in]     srclen   Input buffer size
+ * [out]    dstdata  Output buffer physical address
+ * [out]    dstlen   Output buffer size
+ * [in]     encrypt  True for encryption, false otherwise
+ */
+TEE_Result cipher_xts(uint32_t key_id_1, uint32_t key_id_2, paddr_t iv,
+		      paddr_t srcdata, size_t srclen, paddr_t dstdata,
+		      size_t dstlen, bool encrypt);
+
+void ce_aes_cbc_encrypt(uint8_t out[], uint8_t const in[], uint8_t const rk[],
+			int rounds, int blocks, uint8_t iv[]);
+void ce_aes_cbc_decrypt(uint8_t out[], uint8_t const in[], uint8_t const rk[],
+			int rounds, int blocks, uint8_t iv[]);
+void ce_aes_xts_encrypt(uint8_t out[], uint8_t const in[], uint8_t const rk1[],
+			int rounds, int blocks, uint8_t const rk2[],
+			uint8_t iv[]);
+void ce_aes_xts_decrypt(uint8_t out[], uint8_t const in[], uint8_t const rk1[],
+			int rounds, int blocks, uint8_t const rk2[],
+			uint8_t iv[]);
+
+#endif /* __PTA_IMX_TRUSTED_ARM_CE_H__ */
diff --git a/lib/libutils/isoc/bget_malloc.c b/lib/libutils/isoc/bget_malloc.c
index 1b26f93c8..01ef954af 100644
--- a/lib/libutils/isoc/bget_malloc.c
+++ b/lib/libutils/isoc/bget_malloc.c
@@ -930,6 +930,19 @@ void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
 		return;
 	}
 
+#ifdef CFG_COCKPIT
+	/*
+	 * reset the buffer pool descriptors (Global variable),
+	 * this is needed when partition reboots without reloading optee image
+	 */
+	ctx->pool = (void *)0;
+	ctx->pool_len = 0;
+	ctx->poolset.freelist.bh.bsize = 0;
+	ctx->poolset.freelist.bh.prevfree = 0;
+	ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
+	ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
+#endif
+
 	tag_asan_free((void *)start, end - start);
 	bpool((void *)start, end - start, &ctx->poolset);
 	l = ctx->pool_len + 1;
diff --git a/mk/gcc.mk b/mk/gcc.mk
index adc77a24f..81bfa78ad 100644
--- a/mk/gcc.mk
+++ b/mk/gcc.mk
@@ -13,11 +13,11 @@ nostdinc$(sm)	:= -nostdinc -isystem $(shell $(CC$(sm)) \
 			-print-file-name=include 2> /dev/null)
 
 # Get location of libgcc from gcc
-libgcc$(sm)  	:= $(shell $(CC$(sm)) $(CFLAGS$(arch-bits-$(sm))) \
+libgcc$(sm)  	:= $(shell $(CC$(sm)) $(LIBGCC_LOCATE_CFLAGS) $(CFLAGS$(arch-bits-$(sm))) \
 			-print-libgcc-file-name 2> /dev/null)
-libstdc++$(sm)	:= $(shell $(CXX$(sm)) $(CXXFLAGS$(arch-bits-$(sm))) $(comp-cxxflags$(sm)) \
+libstdc++$(sm)	:= $(shell $(CXX$(sm)) $(LIBGCC_LOCATE_CFLAGS) $(CXXFLAGS$(arch-bits-$(sm))) $(comp-cxxflags$(sm)) \
 			-print-file-name=libstdc++.a 2> /dev/null)
-libgcc_eh$(sm)	:= $(shell $(CXX$(sm)) $(CXXFLAGS$(arch-bits-$(sm))) $(comp-cxxflags$(sm)) \
+libgcc_eh$(sm)	:= $(shell $(CXX$(sm)) $(LIBGCC_LOCATE_CFLAGS) $(CXXFLAGS$(arch-bits-$(sm))) $(comp-cxxflags$(sm)) \
 			-print-file-name=libgcc_eh.a 2> /dev/null)
 
 # Define these to something to discover accidental use
diff --git a/scripts/nxp_build.sh b/scripts/nxp_build.sh
new file mode 100755
index 000000000..9087510a6
--- /dev/null
+++ b/scripts/nxp_build.sh
@@ -0,0 +1,137 @@
+#!/bin/bash
+# Copyright 2022 NXP
+set -euo pipefail
+
+boards_list=(
+	imx-mx6dhmbedge \
+	imx-mx6dlsabreauto \
+	imx-mx6dlsabresd \
+	imx-mx6dlhmbedge \
+	imx-mx6qsabrelite \
+	imx-mx6qsabresd \
+	imx-mx6qsabreauto
+	imx-mx6qhmbedge \
+	imx-mx6qpsabresd \
+	imx-mx6qpsabreauto \
+	imx-mx6shmbedge \
+	imx-mx6slevk \
+	imx-mx6sllevk \
+	imx-mx6solosabresd \
+	imx-mx6solosabreauto \
+	imx-mx6sxsabreauto \
+	imx-mx6sxsabresd \
+	imx-mx6sxudooneofull \
+	imx-mx6ulevk \
+	imx-mx6ul9x9evk \
+	imx-mx6ulccimx6ulsbcpro\
+	imx-mx6ullevk \
+	imx-mx6ulzevk \
+	imx-mx7dsabresd \
+	imx-mx7dpico_mbl \
+	imx-mx7swarp7 \
+	imx-mx7swarp7_mbl \
+	imx-mx7dclsom \
+	imx-mx7ulpevk \
+	imx-mx8dxmek \
+	imx-mx8mqevk \
+	imx-mx8mmevk \
+	imx-mx8mnevk \
+	imx-mx8mpevk \
+	imx-mx8qxpmek \
+	imx-mx8qmmek \
+	imx-mx8qmmekcockpita53 \
+	imx-mx8qmmekcockpita72 \
+	imx-mx8dxlevk \
+	imx-mx8ulpevk \
+	imx-mx93evk \
+	imx-mx91evk \
+	imx-mx95evk \
+	ls-ls1012ardb \
+	ls-ls1043ardb \
+	ls-ls1046ardb \
+	ls-ls1088ardb \
+	ls-ls2088ardb \
+	ls-lx2160ardb \
+)
+
+CROSS_COMPILE="${CROSS_COMPILE:-arm-linux-gnueabihf-}"
+CROSS_COMPILE64="${CROSS_COMPILE64:-aarch64-linux-gnu-}"
+O="${O:-.}"
+NB_CORES="${NB_CORES:-$(grep -c processor /proc/cpuinfo)}"
+CFG_TEE_CORE_LOG_LEVEL="${CFG_TEE_CORE_LOG_LEVEL:-0}"
+CFG_TEE_TA_LOG_LEVEL="${CFG_TEE_TA_LOG_LEVEL:-0}"
+
+function usage()
+{
+	cat << EOF
+Usage: $(basename "$0") [all] [all-silence] [list] [<board>]
+	all          compile all platform supported
+	list         list of supported platforms
+	<board>      build the given platform
+EOF
+}
+
+# Build the board given in parameter $1
+function build()
+{
+	local plat="$1"
+
+	# Generate the uTee binary for armv7 platforms
+	if [[ "$plat" == *mx[6-7]* ]];
+	then
+		# Compile the tee.bin for all platforms
+		make -j"$NB_CORES" \
+			CROSS_COMPILE="$CROSS_COMPILE" \
+			CROSS_COMPILE64="$CROSS_COMPILE64" \
+			CFG_TEE_CORE_LOG_LEVEL="$CFG_TEE_CORE_LOG_LEVEL" \
+			CFG_TEE_TA_LOG_LEVEL="$CFG_TEE_TA_LOG_LEVEL" \
+			CFG_WERROR=y \
+			PLATFORM="$plat" \
+			O="$O"/build."$plat" \
+			all uTee || exit 1
+	else
+		make -j"$NB_CORES" \
+			CROSS_COMPILE="$CROSS_COMPILE" \
+			CROSS_COMPILE64="$CROSS_COMPILE64" \
+			CFG_TEE_CORE_LOG_LEVEL="$CFG_TEE_CORE_LOG_LEVEL" \
+			CFG_TEE_TA_LOG_LEVEL="$CFG_TEE_TA_LOG_LEVEL" \
+			CFG_WERROR=y \
+			PLATFORM="$plat" \
+			O="$O"/build."$plat" \
+			all || exit 1
+	fi
+}
+
+function build_all()
+{
+	start=$(date +%s)
+	for b in "${boards_list[@]}"
+	do
+		echo "=============Building ""$b""================"
+
+		build "$b"
+	done
+	end=$(date +%s)
+	echo "Compilation time ""$((end-start))"" seconds"
+}
+
+function list_board()
+{
+	for b in "${boards_list[@]}"
+	do
+		echo "$b"
+	done
+}
+
+# Main
+[[ $# -eq 0 ]] && usage && exit 1
+[[ "$1" == "help" ]] && usage && exit 0
+[[ "$1" == "list" ]] && list_board && exit 0
+[[ "$1" == "all" ]] && build_all && exit 0
+
+for b in "$@"
+do
+	build "$b"
+done
+
+exit 0
