diff --git a/driver/src/devicedrv/mali/Kbuild b/driver/src/devicedrv/mali/Kbuild
index 02c3f3d..8c6e6e0 100644
--- a/driver/src/devicedrv/mali/Kbuild
+++ b/driver/src/devicedrv/mali/Kbuild
@@ -21,6 +21,7 @@ MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
 MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
 MALI_UPPER_HALF_SCHEDULING ?= 1
 MALI_ENABLE_CPU_CYCLES ?= 0
+HAVE_UNLOCKED_IOCTL ?= 1
 
 # For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
 # The ARM proprietary product will only include the license/proprietary directory
@@ -179,6 +180,7 @@ ccflags-y += -DMALI_STATE_TRACKING=1
 ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
 ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
 ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES)
+ccflags-y += -DHAVE_UNLOCKED_IOCTL=$(HAVE_UNLOCKED_IOCTL)
 
 ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
 	ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
diff --git a/driver/src/devicedrv/mali/Makefile b/driver/src/devicedrv/mali/Makefile
index b259c9e..d215cea 100644
--- a/driver/src/devicedrv/mali/Makefile
+++ b/driver/src/devicedrv/mali/Makefile
@@ -89,7 +89,11 @@ endif
 # Define host system directory
 KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
 
-include $(KDIR)/.config
+ifeq ($(O),)
+	-include $(KDIR)/.config
+else
+	-include $(O)/.config
+endif
 
 ifeq ($(ARCH), arm)
 # when compiling for ARM we're cross compiling
@@ -204,9 +208,12 @@ EXTRA_DEFINES += -DMALI_MEM_SWAP_TRACKING=1
 endif
 
 all: $(UMP_SYMVERS_FILE)
-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) O=$(O) modules
 	@rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
 
+modules_install:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules_install
+
 clean:
 	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
 
diff --git a/driver/src/devicedrv/mali/common/mali_control_timer.c b/driver/src/devicedrv/mali/common/mali_control_timer.c
index ab8cdf5..96dc5af 100644
--- a/driver/src/devicedrv/mali/common/mali_control_timer.c
+++ b/driver/src/devicedrv/mali/common/mali_control_timer.c
@@ -65,11 +65,17 @@ _mali_osk_errcode_t mali_control_timer_init(void)
 		}
 	}
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	mali_control_timer = _mali_osk_timer_init(mali_control_timer_callback);
+#else
 	mali_control_timer = _mali_osk_timer_init();
+#endif
 	if (NULL == mali_control_timer) {
 		return _MALI_OSK_ERR_FAULT;
 	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
 	_mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+#endif
 
 	return _MALI_OSK_ERR_OK;
 }
diff --git a/driver/src/devicedrv/mali/common/mali_control_timer.c.o57wAll b/driver/src/devicedrv/mali/common/mali_control_timer.c.o57wAll
new file mode 100644
index 0000000..96dc5af
--- /dev/null
+++ b/driver/src/devicedrv/mali/common/mali_control_timer.c.o57wAll
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2015, 2018 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+	_mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+	if (mali_utilization_enabled()) {
+		struct mali_gpu_utilization_data *util_data = NULL;
+		u64 time_period = 0;
+		mali_bool need_add_timer = MALI_TRUE;
+
+		/* Calculate gpu utilization */
+		util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer);
+
+		if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+			mali_dvfs_policy_realize(util_data, time_period);
+#else
+			mali_utilization_platform_realize(util_data);
+#endif
+
+			if (MALI_TRUE == need_add_timer) {
+				mali_control_timer_add(mali_control_timeout);
+			}
+		}
+	}
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+	_mali_osk_device_data data;
+
+	if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+		/* Use device specific settings (if defined) */
+		if (0 != data.control_interval) {
+			mali_control_timeout = data.control_interval;
+			MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+		}
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	mali_control_timer = _mali_osk_timer_init(mali_control_timer_callback);
+#else
+	mali_control_timer = _mali_osk_timer_init();
+#endif
+	if (NULL == mali_control_timer) {
+		return _MALI_OSK_ERR_FAULT;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+	_mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+#endif
+
+	return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+	if (NULL != mali_control_timer) {
+		_mali_osk_timer_del(mali_control_timer);
+		timer_running = MALI_FALSE;
+		_mali_osk_timer_term(mali_control_timer);
+		mali_control_timer = NULL;
+	}
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+	mali_utilization_data_assert_locked();
+
+	if (timer_running != MALI_TRUE) {
+		timer_running = MALI_TRUE;
+
+		period_start_time = time_now;
+
+		mali_utilization_reset();
+
+		return MALI_TRUE;
+	}
+
+	return MALI_FALSE;
+}
+
+void mali_control_timer_pause(void)
+{
+	mali_utilization_data_assert_locked();
+	if (timer_running == MALI_TRUE) {
+		timer_running = MALI_FALSE;
+	}
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+	mali_utilization_data_lock();
+
+	if (timer_running == MALI_TRUE) {
+		timer_running = MALI_FALSE;
+
+		mali_utilization_data_unlock();
+
+		if (suspend == MALI_TRUE) {
+			_mali_osk_timer_del(mali_control_timer);
+			mali_utilization_reset();
+		}
+	} else {
+		mali_utilization_data_unlock();
+	}
+}
diff --git a/driver/src/devicedrv/mali/common/mali_control_timer.c.okPIpOU b/driver/src/devicedrv/mali/common/mali_control_timer.c.okPIpOU
new file mode 100644
index 0000000..96dc5af
--- /dev/null
+++ b/driver/src/devicedrv/mali/common/mali_control_timer.c.okPIpOU
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2015, 2018 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+	_mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+	if (mali_utilization_enabled()) {
+		struct mali_gpu_utilization_data *util_data = NULL;
+		u64 time_period = 0;
+		mali_bool need_add_timer = MALI_TRUE;
+
+		/* Calculate gpu utilization */
+		util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer);
+
+		if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+			mali_dvfs_policy_realize(util_data, time_period);
+#else
+			mali_utilization_platform_realize(util_data);
+#endif
+
+			if (MALI_TRUE == need_add_timer) {
+				mali_control_timer_add(mali_control_timeout);
+			}
+		}
+	}
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+	_mali_osk_device_data data;
+
+	if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+		/* Use device specific settings (if defined) */
+		if (0 != data.control_interval) {
+			mali_control_timeout = data.control_interval;
+			MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+		}
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	mali_control_timer = _mali_osk_timer_init(mali_control_timer_callback);
+#else
+	mali_control_timer = _mali_osk_timer_init();
+#endif
+	if (NULL == mali_control_timer) {
+		return _MALI_OSK_ERR_FAULT;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+	_mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+#endif
+
+	return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+	if (NULL != mali_control_timer) {
+		_mali_osk_timer_del(mali_control_timer);
+		timer_running = MALI_FALSE;
+		_mali_osk_timer_term(mali_control_timer);
+		mali_control_timer = NULL;
+	}
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+	mali_utilization_data_assert_locked();
+
+	if (timer_running != MALI_TRUE) {
+		timer_running = MALI_TRUE;
+
+		period_start_time = time_now;
+
+		mali_utilization_reset();
+
+		return MALI_TRUE;
+	}
+
+	return MALI_FALSE;
+}
+
+void mali_control_timer_pause(void)
+{
+	mali_utilization_data_assert_locked();
+	if (timer_running == MALI_TRUE) {
+		timer_running = MALI_FALSE;
+	}
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+	mali_utilization_data_lock();
+
+	if (timer_running == MALI_TRUE) {
+		timer_running = MALI_FALSE;
+
+		mali_utilization_data_unlock();
+
+		if (suspend == MALI_TRUE) {
+			_mali_osk_timer_del(mali_control_timer);
+			mali_utilization_reset();
+		}
+	} else {
+		mali_utilization_data_unlock();
+	}
+}
diff --git a/driver/src/devicedrv/mali/common/mali_group.c b/driver/src/devicedrv/mali/common/mali_group.c
index 71cf1c8..7a7dbfb 100644
--- a/driver/src/devicedrv/mali/common/mali_group.c
+++ b/driver/src/devicedrv/mali/common/mali_group.c
@@ -65,9 +65,15 @@ struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
 
 	group = _mali_osk_calloc(1, sizeof(struct mali_group));
 	if (NULL != group) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+		group->timeout_timer = _mali_osk_timer_init(mali_group_timeout);
+#else
 		group->timeout_timer = _mali_osk_timer_init();
+#endif
 		if (NULL != group->timeout_timer) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
 			_mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+#endif
 
 			group->l2_cache_core[0] = core;
 			_mali_osk_list_init(&group->group_list);
diff --git a/driver/src/devicedrv/mali/common/mali_osk.h b/driver/src/devicedrv/mali/common/mali_osk.h
index 7dad5de..0043132 100644
--- a/driver/src/devicedrv/mali/common/mali_osk.h
+++ b/driver/src/devicedrv/mali/common/mali_osk.h
@@ -947,7 +947,17 @@ _mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_
  * asked for.
  *
  * @{ */
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @param callback Function to call when timer expires
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(_mali_osk_timer_callback_t callback);
+#else
 /** @brief Initialize a timer
  *
  * Allocates resources for a new timer, and initializes them. This does not
@@ -956,6 +966,7 @@ _mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_
  * @return a pointer to the allocated timer object, or NULL on failure.
  */
 _mali_osk_timer_t *_mali_osk_timer_init(void);
+#endif
 
 /** @brief Start a timer
  *
@@ -1034,6 +1045,7 @@ void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
  */
 mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
 /** @brief Set a timer's callback parameters.
  *
  * This must be called at least once before a timer is started/modified.
@@ -1047,6 +1059,7 @@ mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
  * @param data Function-specific data to supply to the function on expiry.
  */
 void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
+#endif
 
 /** @brief Terminate a timer, and deallocate resources.
  *
diff --git a/driver/src/devicedrv/mali/common/mali_pm.c b/driver/src/devicedrv/mali/common/mali_pm.c
index a0f98da..a52916e 100644
--- a/driver/src/devicedrv/mali/common/mali_pm.c
+++ b/driver/src/devicedrv/mali/common/mali_pm.c
@@ -301,6 +301,7 @@ void mali_pm_init_end(void)
 	}
 
 	_mali_osk_pm_dev_ref_put();
+	_mali_osk_pm_dev_barrier();
 }
 
 void mali_pm_update_sync(void)
diff --git a/driver/src/devicedrv/mali/linux/mali_kernel_linux.c b/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
index 5799fa6..050af67 100644
--- a/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
+++ b/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
@@ -45,6 +45,14 @@
 #if defined(CONFIG_MALI400_INTERNAL_PROFILING)
 #include "mali_profiling_internal.h"
 #endif
+
+#if defined(CONFIG_ARCH_ZYNQMP)
+/* Initialize variables for clocks */
+struct clk *clk_bus;
+struct clk *clk_core;
+mali_bool clk_enabled;
+#endif
+
 #if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
 #include "mali_osk_profiling.h"
 #include "mali_dvfs_policy.h"
@@ -273,6 +281,40 @@ struct file_operations mali_fops = {
 	.mmap = mali_mmap
 };
 
+static int mali_enable_clk(void)
+{
+#if defined(CONFIG_ARCH_ZYNQMP)
+	int err = 0;
+
+	if (clk_enabled)
+		return 0;
+
+	clk_enabled = MALI_TRUE;
+	err = clk_prepare_enable(clk_bus);
+	if (err) {
+		MALI_PRINT_ERROR(("Could not enable clock for GP\n\r"));
+		return err;
+	}
+	err = clk_prepare_enable(clk_core);
+	if (err) {
+		MALI_PRINT_ERROR(("Could not enable clock for PP\n\r"));
+		return err;
+	}
+#endif
+	return 0;
+}
+
+static void mali_disable_clk(void)
+{
+#if defined(CONFIG_ARCH_ZYNQMP)
+	if (clk_enabled) {
+		clk_enabled = MALI_FALSE;
+		clk_disable_unprepare(clk_bus);
+		clk_disable_unprepare(clk_core);
+	}
+#endif
+}
+
 #if MALI_ENABLE_CPU_CYCLES
 void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
 {
@@ -580,7 +622,20 @@ static int mali_probe(struct platform_device *pdev)
 	}
 #endif
 
+#if defined(CONFIG_ARCH_ZYNQMP)
+	/* Initialize clocks for GPU and PP */
+	clk_bus = devm_clk_get(&pdev->dev, "bus");
+	if (IS_ERR(clk_bus))
+		return PTR_ERR(clk_bus);
+
+	clk_core = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(clk_core))
+		return PTR_ERR(clk_core);
+#endif
 
+	err = mali_enable_clk();
+	if (err)
+		return err;
 	if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
 		/* Initialize the Mali GPU HW specified by pdev */
 		if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
@@ -608,12 +663,14 @@ static int mali_probe(struct platform_device *pdev)
 		_mali_osk_wq_term();
 	}
 
+
 #ifdef CONFIG_MALI_DEVFREQ
 	mali_devfreq_term(mdev);
 devfreq_init_failed:
 	mali_pm_metrics_term(mdev);
 pm_metrics_init_failed:
 	clk_disable_unprepare(mdev->clock);
+	mali_disable_clk();
 clock_prepare_failed:
 	clk_put(mdev->clock);
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \
@@ -673,6 +730,12 @@ static int mali_remove(struct platform_device *pdev)
 	mali_platform_device_deinit(mali_platform_device);
 #endif
 	mali_platform_device = NULL;
+
+#if defined(CONFIG_ARCH_ZYNQMP)
+	/* Remove clock */
+	mali_disable_clk();
+#endif
+
 	return 0;
 }
 
@@ -778,6 +841,8 @@ static int mali_driver_runtime_suspend(struct device *dev)
 		devfreq_suspend_device(mdev->devfreq);
 #endif
 
+	mali_disable_clk();
+
 		return 0;
 	} else {
 		return -EBUSY;
@@ -786,6 +851,11 @@ static int mali_driver_runtime_suspend(struct device *dev)
 
 static int mali_driver_runtime_resume(struct device *dev)
 {
+	int err ;
+
+	err = mali_enable_clk();
+	if (err)
+		return err;
 #ifdef CONFIG_MALI_DEVFREQ
 	struct mali_device *mdev = dev_get_drvdata(dev);
 	if (!mdev)
diff --git a/driver/src/devicedrv/mali/linux/mali_linux_trace.h b/driver/src/devicedrv/mali/linux/mali_linux_trace.h
index 02b130b..9debd95 100644
--- a/driver/src/devicedrv/mali/linux/mali_linux_trace.h
+++ b/driver/src/devicedrv/mali/linux/mali_linux_trace.h
@@ -13,13 +13,11 @@
 
 #include <linux/types.h>
 
-#include <linux/stringify.h>
 #include <linux/tracepoint.h>
 
 #undef  TRACE_SYSTEM
 #define TRACE_SYSTEM mali
 #ifndef TRACEPOINTS_ENABLED
-#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
 #endif
 #define TRACE_INCLUDE_PATH .
 #define TRACE_INCLUDE_FILE mali_linux_trace
diff --git a/driver/src/devicedrv/mali/linux/mali_memory.c b/driver/src/devicedrv/mali/linux/mali_memory.c
index c0f0982..2b2b209 100644
--- a/driver/src/devicedrv/mali/linux/mali_memory.c
+++ b/driver/src/devicedrv/mali/linux/mali_memory.c
@@ -70,7 +70,9 @@ static void mali_mem_vma_close(struct vm_area_struct *vma)
 	}
 }
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
+static vm_fault_t  mali_mem_vma_fault(struct vm_fault *vmf)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
 static int mali_mem_vma_fault(struct vm_fault *vmf)
 #else
 static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c b/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c
index 0c5b6c3..3d982b4 100644
--- a/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c
+++ b/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c
@@ -309,9 +309,14 @@ int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *v
 
 	list_for_each_entry(m_page, &block_mem->pfns, list) {
 		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+		ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+		if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
 		ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
 
 		if (unlikely(0 != ret)) {
+#endif
 			return -EFAULT;
 		}
 		addr += _MALI_OSK_MALI_PAGE_SIZE;
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c.oxcRVOJ b/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c.oxcRVOJ
new file mode 100644
index 0000000..e528699
--- /dev/null
+++ b/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c.oxcRVOJ
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2010-2015, 2018 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+
+
+static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
+{
+	return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
+}
+
+
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
+{
+	return (item->phy_addr / MALI_BLOCK_SIZE);
+}
+
+
+u32 mali_mem_block_get_ref_count(mali_page_node *node)
+{
+	MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+	return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
+}
+
+
+/* Increase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+
+u32 mali_mem_block_add_ref(mali_page_node *node)
+{
+	MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+	MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
+	return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
+}
+
+/* Decase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+u32 mali_mem_block_dec_ref(mali_page_node *node)
+{
+	MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+	MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
+	return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
+}
+
+
+static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
+{
+	mali_block_allocator *info;
+	u32 usable_size;
+	u32 num_blocks;
+	mali_page_node *m_node;
+	mali_block_item *mali_blk_items = NULL;
+	int i = 0;
+
+	usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+	MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+	MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+	num_blocks = usable_size / MALI_BLOCK_SIZE;
+	MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+	if (usable_size == 0) {
+		MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+		return NULL;
+	}
+
+	info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
+	if (NULL != info) {
+		INIT_LIST_HEAD(&info->free);
+		spin_lock_init(&info->sp_lock);
+		info->total_num = num_blocks;
+		mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
+
+		if (mali_blk_items) {
+			info->items = mali_blk_items;
+			/* add blocks(4k size) to free list*/
+			for (i = 0 ; i < num_blocks ; i++) {
+				/* add block information*/
+				mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
+				/* add  to free list */
+				m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+				if (m_node == NULL)
+					goto fail;
+				_mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
+				list_add_tail(&m_node->list, &info->free);
+				atomic_add(1, &info->free_num);
+			}
+			return info;
+		}
+	}
+fail:
+	mali_mem_block_allocator_destroy();
+	return NULL;
+}
+
+void mali_mem_block_allocator_destroy(void)
+{
+	struct mali_page_node *m_page, *m_tmp;
+	mali_block_allocator *info = mali_mem_block_gobal_allocator;
+	MALI_DEBUG_ASSERT_POINTER(info);
+	MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
+
+	if (NULL == info)
+		return;
+
+	list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+		list_del(&m_page->list);
+		kfree(m_page);
+	}
+
+	_mali_osk_free(info->items);
+	_mali_osk_free(info);
+}
+
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
+{
+	mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+	u32 free_pages_nr = 0;
+	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+	/* Unmap the memory from the mali virtual address space. */
+	mali_mem_block_mali_unmap(alloc);
+	mutex_lock(&mem_bkend->mutex);
+	free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
+	mutex_unlock(&mem_bkend->mutex);
+	return free_pages_nr;
+}
+
+
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
+{
+	struct mali_page_node *m_page, *m_tmp;
+	size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+	mali_block_allocator *info = mali_mem_block_gobal_allocator;
+	MALI_DEBUG_ASSERT_POINTER(info);
+
+	MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
+	/*do some init */
+	INIT_LIST_HEAD(&block_mem->pfns);
+
+	spin_lock(&info->sp_lock);
+	/*check if have enough space*/
+	if (atomic_read(&info->free_num) > page_count) {
+		list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+			if (page_count > 0) {
+				MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+				MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
+				list_move(&m_page->list, &block_mem->pfns);
+				block_mem->count++;
+				atomic_dec(&info->free_num);
+				_mali_page_node_ref(m_page);
+			} else {
+				break;
+			}
+			page_count--;
+		}
+	} else {
+		/* can't allocate from BLOCK memory*/
+		spin_unlock(&info->sp_lock);
+		return -1;
+	}
+
+	spin_unlock(&info->sp_lock);
+	return 0;
+}
+
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
+{
+	u32 free_pages_nr = 0;
+
+	free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
+	MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
+			     free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+	block_mem->count = 0;
+	MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
+
+	return free_pages_nr;
+}
+
+
+u32 mali_mem_block_free_list(struct list_head *list)
+{
+	struct mali_page_node *m_page, *m_tmp;
+	mali_block_allocator *info = mali_mem_block_gobal_allocator;
+	u32 free_pages_nr = 0;
+
+	if (info) {
+		spin_lock(&info->sp_lock);
+		list_for_each_entry_safe(m_page, m_tmp , list, list) {
+			if (1 == _mali_page_node_get_ref_count(m_page)) {
+				free_pages_nr++;
+			}
+			mali_mem_block_free_node(m_page);
+		}
+		spin_unlock(&info->sp_lock);
+	}
+	return free_pages_nr;
+}
+
+/* free the node,*/
+void mali_mem_block_free_node(struct mali_page_node *node)
+{
+	mali_block_allocator *info = mali_mem_block_gobal_allocator;
+
+	/* only handle BLOCK node */
+	if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+		/*Need to make this atomic?*/
+		if (1 == _mali_page_node_get_ref_count(node)) {
+			/*Move to free list*/
+			_mali_page_node_unref(node);
+			list_move_tail(&node->list, &info->free);
+			atomic_add(1, &info->free_num);
+		} else {
+			_mali_page_node_unref(node);
+			list_del(&node->list);
+			kfree(node);
+		}
+	}
+}
+
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+	mali_block_allocator *info = mali_mem_block_gobal_allocator;
+	mali_page_node *new_node;
+
+	/* only handle BLOCK node */
+	if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+		/*Need to make this atomic?*/
+		if (1 == _mali_page_node_get_ref_count(node)) {
+			/* allocate a  new node, Add to free list, keep the old node*/
+			_mali_page_node_unref(node);
+			new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+			if (new_node) {
+				memcpy(new_node, node, sizeof(mali_page_node));
+				list_add(&new_node->list, &info->free);
+				atomic_add(1, &info->free_num);
+			} else
+				return _MALI_OSK_ERR_FAULT;
+
+		} else {
+			_mali_page_node_unref(node);
+		}
+	}
+	return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+	struct mali_page_directory *pagedir = session->page_directory;
+	struct mali_page_node *m_page;
+	dma_addr_t phys;
+	u32 virt = vaddr;
+	u32 prop = props;
+
+	list_for_each_entry(m_page, &block_mem->pfns, list) {
+		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+		phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+		/* Verify that the "physical" address is 32-bit and
+		 * usable for Mali, when on a system with bus addresses
+		 * wider than 32-bit. */
+		MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+		mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+		virt += MALI_MMU_PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
+{
+	struct mali_session_data *session;
+	MALI_DEBUG_ASSERT_POINTER(alloc);
+	session = alloc->session;
+	MALI_DEBUG_ASSERT_POINTER(session);
+
+	mali_session_memory_lock(session);
+	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+			       alloc->flags);
+	mali_session_memory_unlock(session);
+}
+
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+	int ret;
+	mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
+	unsigned long addr = vma->vm_start;
+	struct mali_page_node *m_page;
+	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+	list_for_each_entry(m_page, &block_mem->pfns, list) {
+		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+		ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+		if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
+		ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+		if (unlikely(0 != ret)) {
+#endif
+			return -EFAULT;
+		}
+		addr += _MALI_OSK_MALI_PAGE_SIZE;
+
+	}
+
+	return 0;
+}
+
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+	mali_block_allocator *allocator;
+
+	/* Do the low level linux operation first */
+
+	/* Request ownership of the memory */
+	if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
+		MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+		return _MALI_OSK_ERR_FAULT;
+	}
+
+	/* Create generic block allocator object to handle it */
+	allocator = mali_mem_block_allocator_create(start, size);
+
+	if (NULL == allocator) {
+		MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+		_mali_osk_mem_unreqregion(start, size);
+		MALI_ERROR(_MALI_OSK_ERR_FAULT);
+	}
+
+	mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
+
+	return _MALI_OSK_ERR_OK;
+}
+
+mali_bool mali_memory_have_dedicated_memory(void)
+{
+	return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+	mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
+	MALI_DEBUG_ASSERT_POINTER(allocator);
+
+	return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_cow.c b/driver/src/devicedrv/mali/linux/mali_memory_cow.c
index f1d44fe..4ca124f 100644
--- a/driver/src/devicedrv/mali/linux/mali_memory_cow.c
+++ b/driver/src/devicedrv/mali/linux/mali_memory_cow.c
@@ -1,11 +1,11 @@
 /*
- * Copyright (C) 2013-2016, 2018 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ * Copyright (C) 2013-2016, 2018 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 #include <linux/mm.h>
 #include <linux/list.h>
@@ -532,9 +532,14 @@ int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma
 		 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
 		ret = vm_insert_page(vma, addr, page);
 		*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+		ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+		if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
 		ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
 
 		if (unlikely(0 != ret)) {
+#endif
 			return ret;
 		}
 		addr += _MALI_OSK_MALI_PAGE_SIZE;
@@ -569,9 +574,14 @@ _mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bken
 
 	list_for_each_entry(m_page, &cow->pages, list) {
 		if ((count >= offset) && (count < offset + num)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+			ret = vmf_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
+			if (unlikely(VM_FAULT_ERROR & ret)) {
+#else			
 			ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
 
 			if (unlikely(0 != ret)) {
+#endif
 				if (count == offset) {
 					return _MALI_OSK_ERR_FAULT;
 				} else {
@@ -683,7 +693,11 @@ void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)
 		/*
 		* use ioremap to map src for BLOCK memory
 		*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+		src = ioremap(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
+#else
 		src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
+#endif
 		memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
 		iounmap(src);
 	}
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_cow.c.orig b/driver/src/devicedrv/mali/linux/mali_memory_cow.c.orig
new file mode 100644
index 0000000..f1d44fe
--- /dev/null
+++ b/driver/src/devicedrv/mali/linux/mali_memory_cow.c.orig
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2013-2016, 2018 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#ifdef CONFIG_ARM
+#include <asm/outercache.h>
+#endif
+#include <asm/dma-mapping.h>
+
+#include "mali_memory.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_swap_alloc.h"
+
+/**
+* allocate pages for COW backend and flush cache
+*/
+static struct page *mali_mem_cow_alloc_page(void)
+
+{
+	mali_mem_os_mem os_mem;
+	struct mali_page_node *node;
+	struct page *new_page;
+
+	int ret = 0;
+	/* allocate pages from os mem */
+	ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);
+
+	if (ret) {
+		return NULL;
+	}
+
+	MALI_DEBUG_ASSERT(1 == os_mem.count);
+
+	node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);
+	new_page = node->page;
+	node->page = NULL;
+	list_del(&node->list);
+	kfree(node);
+
+	return new_page;
+}
+
+
+static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,
+		u32 target_offset,
+		u32 target_size)
+{
+	MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||
+			  MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);
+
+	if (MALI_MEM_OS == target_bk->type) {
+		MALI_DEBUG_ASSERT(&target_bk->os_mem);
+		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);
+		return &target_bk->os_mem.pages;
+	} else if (MALI_MEM_COW == target_bk->type) {
+		MALI_DEBUG_ASSERT(&target_bk->cow_mem);
+		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);
+		return  &target_bk->cow_mem.pages;
+	} else if (MALI_MEM_BLOCK == target_bk->type) {
+		MALI_DEBUG_ASSERT(&target_bk->block_mem);
+		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);
+		return  &target_bk->block_mem.pfns;
+	} else if (MALI_MEM_SWAP == target_bk->type) {
+		MALI_DEBUG_ASSERT(&target_bk->swap_mem);
+		MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);
+		return  &target_bk->swap_mem.pages;
+	}
+
+	return NULL;
+}
+
+/**
+* Do COW for os memory - support do COW for memory from bank memory
+* The range_start/size can be zero, which means it will call cow_modify_range
+* latter.
+* This function allocate new pages for COW backend from os mem for a modified range
+* It will keep the page which not in the modified range and Add ref to it
+*
+* @target_bk - target allocation's backend(the allocation need to do COW)
+* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)
+* @target_size - size of target allocation to do COW (for support memory bank)
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range
+*/
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
+		u32 target_offset,
+		u32 target_size,
+		mali_mem_backend *backend,
+		u32 range_start,
+		u32 range_size)
+{
+	mali_mem_cow *cow = &backend->cow_mem;
+	struct mali_page_node *m_page, *m_tmp, *page_node;
+	int target_page = 0;
+	struct page *new_page;
+	struct list_head *pages = NULL;
+
+	pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
+
+	if (NULL == pages) {
+		MALI_DEBUG_PRINT_ERROR(("No memory page  need to cow ! \n"));
+		return _MALI_OSK_ERR_FAULT;
+	}
+
+	MALI_DEBUG_ASSERT(0 == cow->count);
+
+	INIT_LIST_HEAD(&cow->pages);
+	mutex_lock(&target_bk->mutex);
+	list_for_each_entry_safe(m_page, m_tmp, pages, list) {
+		/* add page from (target_offset,target_offset+size) to cow backend */
+		if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
+		    (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
+
+			/* allocate a new page node, alway use OS memory for COW */
+			page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+
+			if (NULL == page_node) {
+				mutex_unlock(&target_bk->mutex);
+				goto error;
+			}
+
+			INIT_LIST_HEAD(&page_node->list);
+
+			/* check if in the modified range*/
+			if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+			    (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+				/* need to allocate a new page */
+				/* To simplify the case, All COW memory is allocated from os memory ?*/
+				new_page = mali_mem_cow_alloc_page();
+
+				if (NULL == new_page) {
+					kfree(page_node);
+					mutex_unlock(&target_bk->mutex);
+					goto error;
+				}
+
+				_mali_page_node_add_page(page_node, new_page);
+			} else {
+				/*Add Block memory case*/
+				if (m_page->type != MALI_PAGE_NODE_BLOCK) {
+					_mali_page_node_add_page(page_node, m_page->page);
+				} else {
+					page_node->type = MALI_PAGE_NODE_BLOCK;
+					_mali_page_node_add_block_item(page_node, m_page->blk_it);
+				}
+
+				/* add ref to this page */
+				_mali_page_node_ref(m_page);
+			}
+
+			/* add it to COW backend page list */
+			list_add_tail(&page_node->list, &cow->pages);
+			cow->count++;
+		}
+		target_page++;
+	}
+	mutex_unlock(&target_bk->mutex);
+	return _MALI_OSK_ERR_OK;
+error:
+	mali_mem_cow_release(backend, MALI_FALSE);
+	return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,
+		u32 target_offset,
+		u32 target_size,
+		mali_mem_backend *backend,
+		u32 range_start,
+		u32 range_size)
+{
+	mali_mem_cow *cow = &backend->cow_mem;
+	struct mali_page_node *m_page, *m_tmp, *page_node;
+	int target_page = 0;
+	struct mali_swap_item *swap_item;
+	struct list_head *pages = NULL;
+
+	pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
+	if (NULL == pages) {
+		MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));
+		return _MALI_OSK_ERR_FAULT;
+	}
+
+	MALI_DEBUG_ASSERT(0 == cow->count);
+
+	INIT_LIST_HEAD(&cow->pages);
+	mutex_lock(&target_bk->mutex);
+
+	backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
+
+	list_for_each_entry_safe(m_page, m_tmp, pages, list) {
+		/* add page from (target_offset,target_offset+size) to cow backend */
+		if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
+		    (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
+
+			/* allocate a new page node, use swap memory for COW memory swap cowed flag. */
+			page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
+
+			if (NULL == page_node) {
+				mutex_unlock(&target_bk->mutex);
+				goto error;
+			}
+
+			/* check if in the modified range*/
+			if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+			    (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+				/* need to allocate a new page */
+				/* To simplify the case, All COW memory is allocated from os memory ?*/
+				swap_item = mali_mem_swap_alloc_swap_item();
+
+				if (NULL == swap_item) {
+					kfree(page_node);
+					mutex_unlock(&target_bk->mutex);
+					goto error;
+				}
+
+				swap_item->idx = mali_mem_swap_idx_alloc();
+
+				if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
+					MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));
+					kfree(page_node);
+					kfree(swap_item);
+					mutex_unlock(&target_bk->mutex);
+					goto error;
+				}
+
+				_mali_page_node_add_swap_item(page_node, swap_item);
+			} else {
+				_mali_page_node_add_swap_item(page_node, m_page->swap_it);
+
+				/* add ref to this page */
+				_mali_page_node_ref(m_page);
+			}
+
+			list_add_tail(&page_node->list, &cow->pages);
+			cow->count++;
+		}
+		target_page++;
+	}
+	mutex_unlock(&target_bk->mutex);
+
+	return _MALI_OSK_ERR_OK;
+error:
+	mali_mem_swap_release(backend, MALI_FALSE);
+	return _MALI_OSK_ERR_FAULT;
+
+}
+
+
+_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)
+{
+	if (node->type == MALI_PAGE_NODE_OS) {
+		return mali_mem_os_put_page(node->page);
+	} else if (node->type == MALI_PAGE_NODE_BLOCK) {
+		return mali_mem_block_unref_node(node);
+	} else if (node->type == MALI_PAGE_NODE_SWAP) {
+		return _mali_mem_swap_put_page_node(node);
+	} else
+		MALI_DEBUG_ASSERT(0);
+	return _MALI_OSK_ERR_FAULT;
+}
+
+
+/**
+* Modify a range of a exist COW backend
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range(in byte)
+*/
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
+		u32 range_start,
+		u32 range_size)
+{
+	mali_mem_allocation *alloc = NULL;
+	struct mali_session_data *session;
+	mali_mem_cow *cow = &backend->cow_mem;
+	struct mali_page_node *m_page, *m_tmp;
+	LIST_HEAD(pages);
+	struct page *new_page;
+	u32 count = 0;
+	s32 change_pages_nr = 0;
+	_mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+	if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+	if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+	alloc = backend->mali_allocation;
+	MALI_DEBUG_ASSERT_POINTER(alloc);
+
+	session = alloc->session;
+	MALI_DEBUG_ASSERT_POINTER(session);
+
+	MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
+	MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);
+
+	mutex_lock(&backend->mutex);
+
+	/* free pages*/
+	list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {
+
+		/* check if in the modified range*/
+		if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+		    (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+			if (MALI_PAGE_NODE_SWAP != m_page->type) {
+				new_page = mali_mem_cow_alloc_page();
+
+				if (NULL == new_page) {
+					goto error;
+				}
+				if (1 != _mali_page_node_get_ref_count(m_page))
+					change_pages_nr++;
+				/* unref old page*/
+				_mali_osk_mutex_wait(session->cow_lock);
+				if (_mali_mem_put_page_node(m_page)) {
+					__free_page(new_page);
+					_mali_osk_mutex_signal(session->cow_lock);
+					goto error;
+				}
+				_mali_osk_mutex_signal(session->cow_lock);
+				/* add new page*/
+				/* always use OS for COW*/
+				m_page->type = MALI_PAGE_NODE_OS;
+				_mali_page_node_add_page(m_page, new_page);
+			} else {
+				struct mali_swap_item *swap_item;
+
+				swap_item = mali_mem_swap_alloc_swap_item();
+
+				if (NULL == swap_item) {
+					goto error;
+				}
+
+				swap_item->idx = mali_mem_swap_idx_alloc();
+
+				if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
+					MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));
+					kfree(swap_item);
+					goto error;
+				}
+
+				if (1 != _mali_page_node_get_ref_count(m_page)) {
+					change_pages_nr++;
+				}
+
+				if (_mali_mem_put_page_node(m_page)) {
+					mali_mem_swap_free_swap_item(swap_item);
+					goto error;
+				}
+
+				_mali_page_node_add_swap_item(m_page, swap_item);
+			}
+		}
+		count++;
+	}
+	cow->change_pages_nr  = change_pages_nr;
+
+	MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);
+
+	/* ZAP cpu mapping(modified range), and do cpu mapping here if need */
+	if (NULL != alloc->cpu_mapping.vma) {
+		MALI_DEBUG_ASSERT(0 != alloc->backend_handle);
+		MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);
+		MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);
+
+		if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+			zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
+
+			ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start  + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);
+
+			if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+				MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));
+				ret =  _MALI_OSK_ERR_FAULT;
+			}
+		} else {
+			/* used to trigger page fault for swappable cowed memory. */
+			alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;
+			alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;
+
+			zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
+			/* delete this flag to let swappble is ummapped regard to stauct page not page frame. */
+			alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;
+			alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;
+		}
+	}
+
+error:
+	mutex_unlock(&backend->mutex);
+	return ret;
+
+}
+
+
+/**
+* Allocate pages for COW backend
+* @alloc  -allocation for COW allocation
+* @target_bk - target allocation's backend(the allocation need to do COW)
+* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)
+* @target_size - size of target allocation to do COW (for support memory bank)(in byte)
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range(in byte)
+*/
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
+				       u32 target_offset,
+				       u32 target_size,
+				       mali_mem_backend *backend,
+				       u32 range_start,
+				       u32 range_size)
+{
+	struct mali_session_data *session = backend->mali_allocation->session;
+
+	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+	/* size & offset must be a multiple of the system page size */
+	if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+	if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+	if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+	if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+	/* check backend type */
+	MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
+
+	switch (target_bk->type) {
+	case MALI_MEM_OS:
+	case MALI_MEM_BLOCK:
+		return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+		break;
+	case MALI_MEM_COW:
+		if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
+			return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+		} else {
+			return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+		}
+		break;
+	case MALI_MEM_SWAP:
+		return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+		break;
+	case MALI_MEM_EXTERNAL:
+		/*NOT support yet*/
+		MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));
+		return _MALI_OSK_ERR_UNSUPPORTED;
+		break;
+	case MALI_MEM_DMA_BUF:
+		/*NOT support yet*/
+		MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));
+		return _MALI_OSK_ERR_UNSUPPORTED;
+		break;
+	case MALI_MEM_UMP:
+		/*NOT support yet*/
+		MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));
+		return _MALI_OSK_ERR_UNSUPPORTED;
+		break;
+	default:
+		/*Not support yet*/
+		MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));
+		return _MALI_OSK_ERR_UNSUPPORTED;
+		break;
+	}
+	return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Map COW backend memory to mali
+* Support OS/BLOCK for mali_page_node
+*/
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)
+{
+	mali_mem_allocation *cow_alloc;
+	struct mali_page_node *m_page;
+	struct mali_session_data *session;
+	struct mali_page_directory *pagedir;
+	u32 virt, start;
+
+	cow_alloc = mem_bkend->mali_allocation;
+	virt = cow_alloc->mali_vma_node.vm_node.start;
+	start = virt;
+
+	MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+	MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+	MALI_DEBUG_ASSERT_POINTER(cow_alloc);
+
+	session = cow_alloc->session;
+	pagedir = session->page_directory;
+	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+	list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {
+		if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {
+			dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+			MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+			mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,
+						MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+		}
+		virt += MALI_MMU_PAGE_SIZE;
+	}
+	return 0;
+}
+
+/**
+* Map COW backend to cpu
+* support OS/BLOCK memory
+*/
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+	mali_mem_cow *cow = &mem_bkend->cow_mem;
+	struct mali_page_node *m_page;
+	int ret;
+	unsigned long addr = vma->vm_start;
+	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
+
+	list_for_each_entry(m_page, &cow->pages, list) {
+		/* We should use vm_insert_page, but it does a dcache
+		 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+		ret = vm_insert_page(vma, addr, page);
+		*/
+		ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+
+		if (unlikely(0 != ret)) {
+			return ret;
+		}
+		addr += _MALI_OSK_MALI_PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+/**
+* Map some pages(COW backend) to CPU vma@vaddr
+*@ mem_bkend - COW backend
+*@ vma
+*@ vaddr -start CPU vaddr mapped to
+*@ num - max number of pages to map to CPU vaddr
+*/
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
+		struct vm_area_struct *vma,
+		unsigned long vaddr,
+		int num)
+{
+	mali_mem_cow *cow = &mem_bkend->cow_mem;
+	struct mali_page_node *m_page;
+	int ret;
+	int offset;
+	int count ;
+	unsigned long vstart = vma->vm_start;
+	count = 0;
+	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
+	MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+	MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+	offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+
+	list_for_each_entry(m_page, &cow->pages, list) {
+		if ((count >= offset) && (count < offset + num)) {
+			ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
+
+			if (unlikely(0 != ret)) {
+				if (count == offset) {
+					return _MALI_OSK_ERR_FAULT;
+				} else {
+					/* ret is EBUSY when page isn't in modify range, but now it's OK*/
+					return _MALI_OSK_ERR_OK;
+				}
+			}
+			vaddr += _MALI_OSK_MALI_PAGE_SIZE;
+		}
+		count++;
+	}
+	return _MALI_OSK_ERR_OK;
+}
+
+/**
+* Release COW backend memory
+* free it directly(put_page--unref page), not put into pool
+*/
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
+{
+	mali_mem_allocation *alloc;
+	struct mali_session_data *session;
+	u32 free_pages_nr = 0;
+	MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+	MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+	alloc = mem_bkend->mali_allocation;
+	MALI_DEBUG_ASSERT_POINTER(alloc);
+
+	session = alloc->session;
+	MALI_DEBUG_ASSERT_POINTER(session);
+
+	if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {
+		/* Unmap the memory from the mali virtual address space. */
+		if (MALI_TRUE == is_mali_mapped)
+			mali_mem_os_mali_unmap(alloc);
+		/* free cow backend list*/
+		_mali_osk_mutex_wait(session->cow_lock);
+		free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);
+		_mali_osk_mutex_signal(session->cow_lock);
+
+		free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);
+
+		MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));
+	} else {
+		free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);
+	}
+
+
+	MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+			     free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+	mem_bkend->cow_mem.count = 0;
+	return free_pages_nr;
+}
+
+
+/* Dst node could os node or swap node. */
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)
+{
+	void *dst, *src;
+	struct page *dst_page;
+	dma_addr_t dma_addr;
+
+	MALI_DEBUG_ASSERT(src_node != NULL);
+	MALI_DEBUG_ASSERT(dst_node != NULL);
+	MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS
+			  || dst_node->type == MALI_PAGE_NODE_SWAP);
+
+	if (dst_node->type == MALI_PAGE_NODE_OS) {
+		dst_page = dst_node->page;
+	} else {
+		dst_page = dst_node->swap_it->page;
+	}
+
+	dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),
+		       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+	/* map it , and copy the content*/
+	dst = kmap_atomic(dst_page);
+
+	if (src_node->type == MALI_PAGE_NODE_OS ||
+	    src_node->type == MALI_PAGE_NODE_SWAP) {
+		struct page *src_page;
+
+		if (src_node->type == MALI_PAGE_NODE_OS) {
+			src_page = src_node->page;
+		} else {
+			src_page = src_node->swap_it->page;
+		}
+
+		/* Clear and invaliate cache */
+		/* In ARM architecture, speculative read may pull stale data into L1 cache
+		 * for kernel linear mapping page table. DMA_BIDIRECTIONAL could
+		 * invalidate the L1 cache so that following read get the latest data
+		*/
+		dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),
+			       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+		src = kmap_atomic(src_page);
+		memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
+		kunmap_atomic(src);
+		dma_addr = dma_map_page(&mali_platform_device->dev, src_page,
+					0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+		if (src_node->type == MALI_PAGE_NODE_SWAP) {
+			src_node->swap_it->dma_addr = dma_addr;
+		}
+	} else if (src_node->type == MALI_PAGE_NODE_BLOCK) {
+		/*
+		* use ioremap to map src for BLOCK memory
+		*/
+		src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
+		memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
+		iounmap(src);
+	}
+	kunmap_atomic(dst);
+	dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,
+				0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+	if (dst_node->type == MALI_PAGE_NODE_SWAP) {
+		dst_node->swap_it->dma_addr = dma_addr;
+	}
+}
+
+
+/*
+* allocate page on demand when CPU access it,
+* THis used in page fault handler
+*/
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)
+{
+	struct page *new_page = NULL;
+	struct mali_page_node *new_node = NULL;
+	int i = 0;
+	struct mali_page_node *m_page, *found_node = NULL;
+	struct  mali_session_data *session = NULL;
+	mali_mem_cow *cow = &mem_bkend->cow_mem;
+	MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+	MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);
+	MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));
+
+	/* allocate new page here */
+	new_page = mali_mem_cow_alloc_page();
+	if (!new_page)
+		return _MALI_OSK_ERR_NOMEM;
+
+	new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+	if (!new_node) {
+		__free_page(new_page);
+		return _MALI_OSK_ERR_NOMEM;
+	}
+
+	/* find the page in backend*/
+	list_for_each_entry(m_page, &cow->pages, list) {
+		if (i == offset_page) {
+			found_node = m_page;
+			break;
+		}
+		i++;
+	}
+	MALI_DEBUG_ASSERT(found_node);
+	if (NULL == found_node) {
+		__free_page(new_page);
+		kfree(new_node);
+		return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+	}
+
+	_mali_page_node_add_page(new_node, new_page);
+
+	/* Copy the src page's content to new page */
+	_mali_mem_cow_copy_page(found_node, new_node);
+
+	MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
+	session = mem_bkend->mali_allocation->session;
+	MALI_DEBUG_ASSERT_POINTER(session);
+	if (1 != _mali_page_node_get_ref_count(found_node)) {
+		atomic_add(1, &session->mali_mem_allocated_pages);
+		if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+			session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+		}
+		mem_bkend->cow_mem.change_pages_nr++;
+	}
+
+	_mali_osk_mutex_wait(session->cow_lock);
+	if (_mali_mem_put_page_node(found_node)) {
+		__free_page(new_page);
+		kfree(new_node);
+		_mali_osk_mutex_signal(session->cow_lock);
+		return _MALI_OSK_ERR_NOMEM;
+	}
+	_mali_osk_mutex_signal(session->cow_lock);
+
+	list_replace(&found_node->list, &new_node->list);
+
+	kfree(found_node);
+
+	/* map to GPU side*/
+	_mali_osk_mutex_wait(session->memory_lock);
+	mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);
+	_mali_osk_mutex_signal(session->memory_lock);
+	return _MALI_OSK_ERR_OK;
+}
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_dma_buf.c b/driver/src/devicedrv/mali/linux/mali_memory_dma_buf.c
index 905cd8b..72e28a8 100644
--- a/driver/src/devicedrv/mali/linux/mali_memory_dma_buf.c
+++ b/driver/src/devicedrv/mali/linux/mali_memory_dma_buf.c
@@ -34,6 +34,10 @@
 #include "mali_memory_virtual.h"
 #include "mali_pp_job.h"
 
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0)
+MODULE_IMPORT_NS(DMA_BUF);
+#endif
+
 /*
  * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
  */
@@ -281,7 +285,11 @@ int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_ge
 	buf = dma_buf_get(fd);
 	if (IS_ERR_OR_NULL(buf)) {
 		MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
+		return PTR_ERR_OR_ZERO(buf);
+#else
 		return PTR_RET(buf);
+#endif
 	}
 
 	if (0 != put_user(buf->size, &user_arg->size)) {
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c b/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c
index 7c90cfb..6b8e174 100644
--- a/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c
+++ b/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c
@@ -202,7 +202,9 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
 	/* Allocate new pages, if needed. */
 	for (i = 0; i < remaining; i++) {
 		dma_addr_t dma_addr;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+		gfp_t flags = __GFP_ZERO | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+#elif LINUX_VERSION_CODE == KERNEL_VERSION(4, 14, 0)
 		gfp_t flags = __GFP_ZERO | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | __GFP_COLD;
 #else
 		gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
@@ -239,8 +241,10 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
 		/* Ensure page is flushed from CPU caches. */
 		dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
 					0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
-		dma_unmap_page(&mali_platform_device->dev, dma_addr,
-			       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+		err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+		if (likely(!err))
+			dma_unmap_page(&mali_platform_device->dev, dma_addr,
+				       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
 		dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
 					0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
 
@@ -374,9 +378,15 @@ int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
 		ret = vm_insert_page(vma, addr, page);
 		*/
 		page = m_page->page;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+		ret = vmf_insert_pfn(vma, addr, page_to_pfn(page));
+		if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
 		ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
 
 		if (unlikely(0 != ret)) {
+#endif
+
 			return -EFAULT;
 		}
 		addr += _MALI_OSK_MALI_PAGE_SIZE;
@@ -412,9 +422,14 @@ _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bken
 
 			vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
 			if (mapping_page_num > 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+				ret = vmf_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
+				if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
 				ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
 
 				if (unlikely(0 != ret)) {
+#endif
 					/*will return -EBUSY If the page has already been mapped into table, but it's OK*/
 					if (-EBUSY == ret) {
 						break;
@@ -434,10 +449,15 @@ _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bken
 
 		list_for_each_entry(m_page, &os_mem->pages, list) {
 			if (count >= offset) {
-
+			
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+				ret = vmf_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
+				if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
 				ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
 
 				if (unlikely(0 != ret)) {
+#endif
 					/*will return -EBUSY If the page has already been mapped into table, but it's OK*/
 					if (-EBUSY == ret) {
 						break;
@@ -783,8 +803,11 @@ _mali_osk_errcode_t mali_mem_os_init(void)
 	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
 #endif
 
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0)
+	register_shrinker(&mali_mem_os_allocator.shrinker, "mali");
+#else
 	register_shrinker(&mali_mem_os_allocator.shrinker);
-
+#endif
 	return _MALI_OSK_ERR_OK;
 }
 
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c.orig b/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c.orig
new file mode 100644
index 0000000..68e91bb
--- /dev/null
+++ b/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c.orig
@@ -0,0 +1,836 @@
+/*
+ * Copyright (C) 2013-2018 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_kernel_linux.h"
+
+/* Minimum size of allocator page pool */
+#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
+#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static unsigned long dma_attrs_wc = 0;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
+#endif
+static void mali_mem_os_trim_pool(struct work_struct *work);
+
+struct mali_mem_os_allocator mali_mem_os_allocator = {
+	.pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+	.pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
+	.pool_count = 0,
+
+	.allocated_pages = ATOMIC_INIT(0),
+	.allocation_limit = 0,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	.shrinker.shrink = mali_mem_os_shrink,
+#else
+	.shrinker.count_objects = mali_mem_os_shrink_count,
+	.shrinker.scan_objects = mali_mem_os_shrink,
+#endif
+	.shrinker.seeks = DEFAULT_SEEKS,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+	.timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+	.timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#else
+	.timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#endif
+};
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
+{
+	LIST_HEAD(pages);
+	struct mali_page_node *m_page, *m_tmp;
+	u32 free_pages_nr = 0;
+
+	if (MALI_TRUE == cow_flag) {
+		list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
+			/*only handle OS node here */
+			if (m_page->type == MALI_PAGE_NODE_OS) {
+				if (1 == _mali_page_node_get_ref_count(m_page)) {
+					list_move(&m_page->list, &pages);
+					atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+					free_pages_nr ++;
+				} else {
+					_mali_page_node_unref(m_page);
+					m_page->page = NULL;
+					list_del(&m_page->list);
+					kfree(m_page);
+				}
+			}
+		}
+	} else {
+		list_cut_position(&pages, os_pages, os_pages->prev);
+		atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
+		free_pages_nr = pages_count;
+	}
+
+	/* Put pages on pool. */
+	spin_lock(&mali_mem_os_allocator.pool_lock);
+	list_splice(&pages, &mali_mem_os_allocator.pool_pages);
+	mali_mem_os_allocator.pool_count += free_pages_nr;
+	spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+	if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+		MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+		queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+	}
+	return free_pages_nr;
+}
+
+/**
+* put page without put it into page pool
+*/
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
+{
+	MALI_DEBUG_ASSERT_POINTER(page);
+	if (1 == page_count(page)) {
+		atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+		dma_unmap_page(&mali_platform_device->dev, page_private(page),
+			       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+		ClearPagePrivate(page);
+	}
+	put_page(page);
+	return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count)
+{
+	struct mali_page_node *m_page, *m_tmp;
+	u32 i = 0;
+
+	MALI_DEBUG_ASSERT_POINTER(mem_from);
+	MALI_DEBUG_ASSERT_POINTER(mem_to);
+
+	if (mem_from->count < start_page + page_count) {
+		return _MALI_OSK_ERR_INVALID_ARGS;
+	}
+
+	list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) {
+		if (i >= start_page && i < start_page + page_count) {
+			list_move_tail(&m_page->list, &mem_to->pages);
+			mem_from->count--;
+			mem_to->count++;
+		}
+		i++;
+	}
+
+	return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
+{
+	struct page *new_page;
+	LIST_HEAD(pages_list);
+	size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+	size_t remaining = page_count;
+	struct mali_page_node *m_page, *m_tmp;
+	u32 i;
+
+	MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+	if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+		MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+				     size,
+				     atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+				     mali_mem_os_allocator.allocation_limit));
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&os_mem->pages);
+	os_mem->count = page_count;
+
+	/* Grab pages from pool. */
+	{
+		size_t pool_pages;
+		spin_lock(&mali_mem_os_allocator.pool_lock);
+		pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
+		for (i = pool_pages; i > 0; i--) {
+			BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
+			list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
+		}
+		mali_mem_os_allocator.pool_count -= pool_pages;
+		remaining -= pool_pages;
+		spin_unlock(&mali_mem_os_allocator.pool_lock);
+	}
+
+	/* Process pages from pool. */
+	i = 0;
+	list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
+		BUG_ON(NULL == m_page);
+
+		list_move_tail(&m_page->list, &os_mem->pages);
+	}
+
+	/* Allocate new pages, if needed. */
+	for (i = 0; i < remaining; i++) {
+		dma_addr_t dma_addr;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+		gfp_t flags = __GFP_ZERO | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+#elif LINUX_VERSION_CODE == KERNEL_VERSION(4, 14, 0)
+		gfp_t flags = __GFP_ZERO | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | __GFP_COLD;
+#else
+		gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
+#endif
+		int err;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+		flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+		flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+		flags |= GFP_DMA;
+#else
+		/* arm64 utgard only work on < 4G, but the kernel
+		 * didn't provide method to allocte memory < 4G
+		 */
+		MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+		new_page = alloc_page(flags);
+
+		if (unlikely(NULL == new_page)) {
+			/* Calculate the number of pages actually allocated, and free them. */
+			os_mem->count = (page_count - remaining) + i;
+			atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+			mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+			return -ENOMEM;
+		}
+
+		/* Ensure page is flushed from CPU caches. */
+		dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+					0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+		dma_unmap_page(&mali_platform_device->dev, dma_addr,
+			       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+		dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+					0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+		err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+		if (unlikely(err)) {
+			MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+						new_page, err));
+			__free_page(new_page);
+			os_mem->count = (page_count - remaining) + i;
+			atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+			mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+			return -EFAULT;
+		}
+
+		/* Store page phys addr */
+		SetPagePrivate(new_page);
+		set_page_private(new_page, dma_addr);
+
+		m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+		if (unlikely(NULL == m_page)) {
+			MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
+			dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
+				       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+			ClearPagePrivate(new_page);
+			__free_page(new_page);
+			os_mem->count = (page_count - remaining) + i;
+			atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+			mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+			return -EFAULT;
+		}
+		m_page->page = new_page;
+
+		list_add_tail(&m_page->list, &os_mem->pages);
+	}
+
+	atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
+
+	if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+		MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+		cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+	}
+
+	return 0;
+}
+
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props)
+{
+	struct mali_page_directory *pagedir = session->page_directory;
+	struct mali_page_node *m_page;
+	u32 virt;
+	u32 prop = props;
+
+	MALI_DEBUG_ASSERT_POINTER(session);
+	MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+	MALI_DEBUG_ASSERT(start_page <= os_mem->count);
+	MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count);
+
+	if ((start_page + mapping_pgae_num) == os_mem->count) {
+
+		virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num);
+
+		list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+			virt -= MALI_MMU_PAGE_SIZE;
+			if (mapping_pgae_num > 0) {
+				dma_addr_t phys = page_private(m_page->page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+				/* Verify that the "physical" address is 32-bit and
+				* usable for Mali, when on a system with bus addresses
+				* wider than 32-bit. */
+				MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+				mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+			} else {
+				break;
+			}
+			mapping_pgae_num--;
+		}
+
+	} else {
+		u32 i = 0;
+		virt = vaddr;
+		list_for_each_entry(m_page, &os_mem->pages, list) {
+
+			if (i >= start_page) {
+				dma_addr_t phys = page_private(m_page->page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+				/* Verify that the "physical" address is 32-bit and
+				* usable for Mali, when on a system with bus addresses
+				* wider than 32-bit. */
+				MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+				mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+			}
+			i++;
+			virt += MALI_MMU_PAGE_SIZE;
+		}
+	}
+	return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
+{
+	struct mali_session_data *session;
+	MALI_DEBUG_ASSERT_POINTER(alloc);
+	session = alloc->session;
+	MALI_DEBUG_ASSERT_POINTER(session);
+
+	mali_session_memory_lock(session);
+	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+			       alloc->flags);
+	mali_session_memory_unlock(session);
+}
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+	mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+	struct mali_page_node *m_page;
+	struct page *page;
+	int ret;
+	unsigned long addr = vma->vm_start;
+	MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+	list_for_each_entry(m_page, &os_mem->pages, list) {
+		/* We should use vm_insert_page, but it does a dcache
+		 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+		ret = vm_insert_page(vma, addr, page);
+		*/
+		page = m_page->page;
+		ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
+
+		if (unlikely(0 != ret)) {
+			return -EFAULT;
+		}
+		addr += _MALI_OSK_MALI_PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size)
+{
+	mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+	struct mali_page_node *m_page;
+	int ret;
+	int offset;
+	int mapping_page_num;
+	int count ;
+
+	unsigned long vstart = vma->vm_start;
+	count = 0;
+	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+	MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+	MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+	offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+	MALI_DEBUG_ASSERT(offset <= os_mem->count);
+	mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE;
+	MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count);
+
+	if ((offset + mapping_page_num) == os_mem->count) {
+
+		unsigned long vm_end = start_vaddr + mappig_size;
+
+		list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+			vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
+			if (mapping_page_num > 0) {
+				ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
+
+				if (unlikely(0 != ret)) {
+					/*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+					if (-EBUSY == ret) {
+						break;
+					} else {
+						MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n",
+								     ret,  offset + mapping_page_num, os_mem->count));
+					}
+					return _MALI_OSK_ERR_FAULT;
+				}
+			} else {
+				break;
+			}
+			mapping_page_num--;
+
+		}
+	} else {
+
+		list_for_each_entry(m_page, &os_mem->pages, list) {
+			if (count >= offset) {
+
+				ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
+
+				if (unlikely(0 != ret)) {
+					/*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+					if (-EBUSY == ret) {
+						break;
+					} else {
+						MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n",
+								     ret, count, offset, os_mem->count));
+					}
+					return _MALI_OSK_ERR_FAULT;
+				}
+			}
+			count++;
+			vstart += _MALI_OSK_MALI_PAGE_SIZE;
+		}
+	}
+	return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
+{
+
+	mali_mem_allocation *alloc;
+	struct mali_session_data *session;
+	u32 free_pages_nr = 0;
+	MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+	MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+	alloc = mem_bkend->mali_allocation;
+	MALI_DEBUG_ASSERT_POINTER(alloc);
+
+	session = alloc->session;
+	MALI_DEBUG_ASSERT_POINTER(session);
+
+	/* Unmap the memory from the mali virtual address space. */
+	mali_mem_os_mali_unmap(alloc);
+	mutex_lock(&mem_bkend->mutex);
+	/* Free pages */
+	if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
+		/* Lock to avoid the free race condition for the cow shared memory page node. */
+		_mali_osk_mutex_wait(session->cow_lock);
+		free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
+		_mali_osk_mutex_signal(session->cow_lock);
+	} else {
+		free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
+	}
+	mutex_unlock(&mem_bkend->mutex);
+
+	MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+			     free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+	mem_bkend->os_mem.count = 0;
+	return free_pages_nr;
+}
+
+
+#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
+static struct {
+	struct {
+		mali_dma_addr phys;
+		mali_io_address mapping;
+	} page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
+	size_t count;
+	spinlock_t lock;
+} mali_mem_page_table_page_pool = {
+	.count = 0,
+	.lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+};
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
+{
+	_mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+	dma_addr_t tmp_phys;
+
+	spin_lock(&mali_mem_page_table_page_pool.lock);
+	if (0 < mali_mem_page_table_page_pool.count) {
+		u32 i = --mali_mem_page_table_page_pool.count;
+		*phys = mali_mem_page_table_page_pool.page[i].phys;
+		*mapping = mali_mem_page_table_page_pool.page[i].mapping;
+
+		ret = _MALI_OSK_ERR_OK;
+	}
+	spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+	if (_MALI_OSK_ERR_OK != ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+		*mapping = dma_alloc_attrs(&mali_platform_device->dev,
+					   _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+					   GFP_KERNEL, dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+		*mapping = dma_alloc_attrs(&mali_platform_device->dev,
+					   _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+					   GFP_KERNEL, &dma_attrs_wc);
+#else
+		*mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+						  _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
+		if (NULL != *mapping) {
+			ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+			/* Verify that the "physical" address is 32-bit and
+			 * usable for Mali, when on a system with bus addresses
+			 * wider than 32-bit. */
+			MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+			*phys = (mali_dma_addr)tmp_phys;
+		}
+	}
+
+	return ret;
+}
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
+{
+	spin_lock(&mali_mem_page_table_page_pool.lock);
+	if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
+		u32 i = mali_mem_page_table_page_pool.count;
+		mali_mem_page_table_page_pool.page[i].phys = phys;
+		mali_mem_page_table_page_pool.page[i].mapping = virt;
+
+		++mali_mem_page_table_page_pool.count;
+
+		spin_unlock(&mali_mem_page_table_page_pool.lock);
+	} else {
+		spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+		dma_free_attrs(&mali_platform_device->dev,
+			       _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+			       dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+		dma_free_attrs(&mali_platform_device->dev,
+			       _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+			       &dma_attrs_wc);
+#else
+		dma_free_writecombine(&mali_platform_device->dev,
+				      _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
+	}
+}
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page)
+{
+	struct page *page = m_page->page;
+	MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
+
+	if (1  == page_count(page)) {
+		dma_unmap_page(&mali_platform_device->dev, page_private(page),
+			       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+		ClearPagePrivate(page);
+	}
+	__free_page(page);
+	m_page->page = NULL;
+	list_del(&m_page->list);
+	kfree(m_page);
+}
+
+/* The maximum number of page table pool pages to free in one go. */
+#define MALI_MEM_OS_CHUNK_TO_FREE 64UL
+
+/* Free a certain number of pages from the page table page pool.
+ * The pool lock must be held when calling the function, and the lock will be
+ * released before returning.
+ */
+static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
+{
+	mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+	void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+	u32 i;
+
+	MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
+
+	/* Remove nr_to_free pages from the pool and store them locally on stack. */
+	for (i = 0; i < nr_to_free; i++) {
+		u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
+
+		phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
+		virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
+	}
+
+	mali_mem_page_table_page_pool.count -= nr_to_free;
+
+	spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+	/* After releasing the spinlock: free the pages we removed from the pool. */
+	for (i = 0; i < nr_to_free; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+		dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+			       virt_arr[i], (dma_addr_t)phys_arr[i], dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+		dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+			       virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+		dma_free_writecombine(&mali_platform_device->dev,
+				      _MALI_OSK_MALI_PAGE_SIZE,
+				      virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
+	}
+}
+
+static void mali_mem_os_trim_page_table_page_pool(void)
+{
+	size_t nr_to_free = 0;
+	size_t nr_to_keep;
+
+	/* Keep 2 page table pages for each 1024 pages in the page cache. */
+	nr_to_keep = mali_mem_os_allocator.pool_count / 512;
+	/* And a minimum of eight pages, to accomodate new sessions. */
+	nr_to_keep += 8;
+
+	if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
+
+	if (nr_to_keep < mali_mem_page_table_page_pool.count) {
+		nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
+		nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
+	}
+
+	/* Pool lock will be released by the callee. */
+	mali_mem_os_page_table_pool_free(nr_to_free);
+}
+
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	return mali_mem_os_allocator.pool_count;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+#endif /* Linux < 2.6.35 */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif /* Linux < 3.12.0 */
+#endif /* Linux < 3.0.0 */
+{
+	struct mali_page_node *m_page, *m_tmp;
+	unsigned long flags;
+	struct list_head *le, pages;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+	int nr = nr_to_scan;
+#else
+	int nr = sc->nr_to_scan;
+#endif
+
+	if (0 == nr) {
+		return mali_mem_os_shrink_count(shrinker, sc);
+	}
+
+	if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
+		/* Not able to lock. */
+		return -1;
+	}
+
+	if (0 == mali_mem_os_allocator.pool_count) {
+		/* No pages availble */
+		spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+		return 0;
+	}
+
+	/* Release from general page pool */
+	nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
+	mali_mem_os_allocator.pool_count -= nr;
+	list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+		--nr;
+		if (0 == nr) break;
+	}
+	list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+	spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+
+	list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+		mali_mem_os_free_page_node(m_page);
+	}
+
+	if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+		/* Pools are empty, stop timer */
+		MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+		cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	return mali_mem_os_shrink_count(shrinker, sc);
+#else
+	return nr;
+#endif
+}
+
+static void mali_mem_os_trim_pool(struct work_struct *data)
+{
+	struct mali_page_node *m_page, *m_tmp;
+	struct list_head *le;
+	LIST_HEAD(pages);
+	size_t nr_to_free;
+
+	MALI_IGNORE(data);
+
+	MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
+
+	/* Release from general page pool */
+	spin_lock(&mali_mem_os_allocator.pool_lock);
+	if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+		size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+		const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
+
+		/* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
+		nr_to_free = max(count / 2, min_to_free);
+
+		mali_mem_os_allocator.pool_count -= nr_to_free;
+		list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+			--nr_to_free;
+			if (0 == nr_to_free) break;
+		}
+		list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+	}
+	spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+	list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+		mali_mem_os_free_page_node(m_page);
+	}
+
+	/* Release some pages from page table page pool */
+	mali_mem_os_trim_page_table_page_pool();
+
+	if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+		MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+		queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+	}
+}
+
+_mali_osk_errcode_t mali_mem_os_init(void)
+{
+	mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
+	if (NULL == mali_mem_os_allocator.wq) {
+		return _MALI_OSK_ERR_NOMEM;
+	}
+#if LINUX_VERSION_CODE >=  KERNEL_VERSION(4, 8, 0)
+	dma_attrs_wc = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
+	register_shrinker(&mali_mem_os_allocator.shrinker);
+
+	return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_os_term(void)
+{
+	struct mali_page_node *m_page, *m_tmp;
+	unregister_shrinker(&mali_mem_os_allocator.shrinker);
+	cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
+
+	if (NULL != mali_mem_os_allocator.wq) {
+		destroy_workqueue(mali_mem_os_allocator.wq);
+		mali_mem_os_allocator.wq = NULL;
+	}
+
+	spin_lock(&mali_mem_os_allocator.pool_lock);
+	list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
+		mali_mem_os_free_page_node(m_page);
+
+		--mali_mem_os_allocator.pool_count;
+	}
+	BUG_ON(mali_mem_os_allocator.pool_count);
+	spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+	/* Release from page table page pool */
+	do {
+		u32 nr_to_free;
+
+		spin_lock(&mali_mem_page_table_page_pool.lock);
+
+		nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
+
+		/* Pool lock will be released by the callee. */
+		mali_mem_os_page_table_pool_free(nr_to_free);
+	} while (0 != mali_mem_page_table_page_pool.count);
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+	mali_mem_os_allocator.allocation_limit = size;
+
+	MALI_SUCCESS;
+}
+
+u32 mali_mem_os_stat(void)
+{
+	return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/driver/src/devicedrv/mali/linux/mali_memory_secure.c b/driver/src/devicedrv/mali/linux/mali_memory_secure.c
index 9032b96..eb87cd9 100644
--- a/driver/src/devicedrv/mali/linux/mali_memory_secure.c
+++ b/driver/src/devicedrv/mali/linux/mali_memory_secure.c
@@ -13,9 +13,17 @@
 #include "mali_memory_secure.h"
 #include "mali_osk.h"
 #include <linux/mutex.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+#include <linux/dma-direct.h>
+#else
 #include <linux/dma-mapping.h>
+#endif
 #include <linux/dma-buf.h>
 
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 0)
+MODULE_IMPORT_NS(DMA_BUF);
+#endif
+
 _mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd)
 {
 	struct dma_buf *buf;
@@ -128,9 +136,14 @@ int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *
 		MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE);
 
 		for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+			ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys));
+			if (unlikely(VM_FAULT_ERROR & ret)) {
+#else
 			ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys));
 
 			if (unlikely(0 != ret)) {
+#endif
 				return -EFAULT;
 			}
 			addr += _MALI_OSK_MALI_PAGE_SIZE;
diff --git a/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c b/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
index 84f93d9..5a0a725 100644
--- a/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
+++ b/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
@@ -33,7 +33,11 @@ void _mali_osk_write_mem_barrier(void)
 
 mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+	return (mali_io_address)ioremap(phys, size);
+#else
 	return (mali_io_address)ioremap_nocache(phys, size);
+#endif
 }
 
 void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
diff --git a/driver/src/devicedrv/mali/linux/mali_osk_mali.c b/driver/src/devicedrv/mali/linux/mali_osk_mali.c
index 63f0699..3f66132 100644
--- a/driver/src/devicedrv/mali/linux/mali_osk_mali.c
+++ b/driver/src/devicedrv/mali/linux/mali_osk_mali.c
@@ -72,26 +72,26 @@ int (*mali_gpu_reset_and_secure_mode_disable)(void) = NULL;
 #define MALI_OSK_RESOURCE_DMA_LOCATION 26
 
 static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
-	{.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
-	{.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
-	{.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
-	{.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
-	{.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
-	{.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
-	{.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
-	{.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
-	{.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
-	{.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
-	{.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
-	{.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
-	{.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
-	{.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
-	{.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
-	{.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
-	{.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
-	{.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
-	{.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
-	{.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+	{.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "gp",},
+	{.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "gpmmu",},
+	{.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "pp0",},
+	{.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "ppmmu0",},
+	{.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "pp1",},
+	{.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "ppmmu1",},
+	{.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "pp2",},
+	{.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "ppmmu2",},
+	{.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "pp3",},
+	{.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "ppmmu3",},
+	{.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "pp4",},
+	{.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "ppmmu4",},
+	{.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "pp5",},
+	{.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "ppmmu5",},
+	{.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "pp6",},
+	{.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "ppmmu6",},
+	{.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "pp7",},
+	{.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "ppmmu",},
+	{.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "pp",},
+	{.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "pmu",},
 	{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
 	{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
 	{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
@@ -113,7 +113,7 @@ static int _mali_osk_get_compatible_name(const char **out_string)
 _mali_osk_errcode_t _mali_osk_resource_initialize(void)
 {
 	mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE;
-	int i, pp_core_num = 0, l2_core_num = 0;
+	int i, pp_core_num = 0, l2_core_num = 0, irq = 0;
 	struct resource *res;
 	const char *compatible_name = NULL;
 
@@ -128,9 +128,9 @@ _mali_osk_errcode_t _mali_osk_resource_initialize(void)
 	}
 
 	for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
-		res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
-		if (res) {
-			mali_osk_resource_bank[i].irq = res->start;
+		irq = platform_get_irq_byname_optional(mali_platform_device, mali_osk_resource_bank[i].irq_name);
+		if (irq > 0) {
+			mali_osk_resource_bank[i].irq = irq;
 		} else {
 			mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
 		}
diff --git a/driver/src/devicedrv/mali/linux/mali_osk_time.c b/driver/src/devicedrv/mali/linux/mali_osk_time.c
index 03046a5..583d82b 100644
--- a/driver/src/devicedrv/mali/linux/mali_osk_time.c
+++ b/driver/src/devicedrv/mali/linux/mali_osk_time.c
@@ -46,14 +46,26 @@ void _mali_osk_time_ubusydelay(u32 usecs)
 
 u64 _mali_osk_time_get_ns(void)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+	struct timespec64 tsval;
+	ktime_get_real_ts64(&tsval);
+	return (u64)timespec64_to_ns(&tsval);
+#else
 	struct timespec tsval;
 	getnstimeofday(&tsval);
 	return (u64)timespec_to_ns(&tsval);
+#endif
 }
 
 u64 _mali_osk_boot_time_get_ns(void)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+	struct timespec64 tsval;
+	ktime_get_boottime_ts64(&tsval);
+	return (u64)timespec64_to_ns(&tsval);
+#else
 	struct timespec tsval;
 	get_monotonic_boottime(&tsval);
 	return (u64)timespec_to_ns(&tsval);
+#endif
 }
diff --git a/driver/src/devicedrv/mali/linux/mali_osk_timers.c b/driver/src/devicedrv/mali/linux/mali_osk_timers.c
index 6e8bfe5..8b70060 100644
--- a/driver/src/devicedrv/mali/linux/mali_osk_timers.c
+++ b/driver/src/devicedrv/mali/linux/mali_osk_timers.c
@@ -21,13 +21,24 @@
 struct _mali_osk_timer_t_struct {
 	struct timer_list timer;
 };
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+typedef void (*timer_timeout_function_t)(struct timer_list *);
+#else
 typedef void (*timer_timeout_function_t)(unsigned long);
+#endif
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+_mali_osk_timer_t *_mali_osk_timer_init(_mali_osk_timer_callback_t callback)
+#else
 _mali_osk_timer_t *_mali_osk_timer_init(void)
+#endif
 {
 	_mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	if (NULL != t) timer_setup(&t->timer, (timer_timeout_function_t)callback, 0);
+#else
 	if (NULL != t) init_timer(&t->timer);
+#endif
 	return t;
 }
 
@@ -62,12 +73,14 @@ mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
 	return 1 == timer_pending(&(tim->timer));
 }
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
 void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
 {
 	MALI_DEBUG_ASSERT_POINTER(tim);
 	tim->timer.data = (unsigned long)data;
 	tim->timer.function = (timer_timeout_function_t)callback;
 }
+#endif
 
 void _mali_osk_timer_term(_mali_osk_timer_t *tim)
 {
diff --git a/driver/src/devicedrv/mali/linux/mali_ukk_mem.c b/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
index 4ec57dc..270bb6d 100644
--- a/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
+++ b/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
@@ -207,8 +207,13 @@ int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_
 	kargs.ctx = (uintptr_t)session_data;
 
 	/* Check if we can access the buffers */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+	if (!access_ok((const void __user *)kargs.dest, kargs.size)
+	    || !access_ok((const void __user *)kargs.src, kargs.size)) {
+#else
 	if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
 	    || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+#endif
 		return -EINVAL;
 	}
 
@@ -266,7 +271,11 @@ int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mal
 		goto err_exit;
 
 	user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+	if (!access_ok(user_buffer, kargs.size))
+#else
 	if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+#endif
 		goto err_exit;
 
 	/* allocate temporary buffer (kernel side) to store mmu page table info */
diff --git a/driver/src/devicedrv/mali/platform/arm/arm.c b/driver/src/devicedrv/mali/platform/arm/arm.c
index cba09cd..e468263 100644
--- a/driver/src/devicedrv/mali/platform/arm/arm.c
+++ b/driver/src/devicedrv/mali/platform/arm/arm.c
@@ -38,7 +38,9 @@
 static int mali_core_scaling_enable = 0;
 
 void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+#if !(defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP))
 static u32 mali_read_phys(u32 phys_addr);
+#endif
 #if defined(CONFIG_ARCH_REALVIEW)
 static void mali_write_phys(u32 phys_addr, u32 value);
 #endif
@@ -96,7 +98,11 @@ static int mali_secure_mode_init_juno(void)
 
 	MALI_DEBUG_ASSERT(NULL == secure_mode_mapped_addr);
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+	secure_mode_mapped_addr = ioremap(phys_addr_page, map_size);
+#else
 	secure_mode_mapped_addr = ioremap_nocache(phys_addr_page, map_size);
+#endif
 	if (NULL != secure_mode_mapped_addr) {
 		return mali_gpu_reset_and_secure_mode_disable_juno();
 	}
@@ -261,6 +267,10 @@ static struct mali_gpu_device_data mali_gpu_data = {
 	.dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
 	.dedicated_mem_size = 0x10000000, /* 256MB */
 #endif
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+	.fb_start = 0x00000000,
+	.fb_size = 0xfffff000,
+#else
 #if defined(CONFIG_ARM64)
 	/* Some framebuffer drivers get the framebuffer dynamically, such as through GEM,
 	* in which the memory resource can't be predicted in advance.
@@ -271,6 +281,7 @@ static struct mali_gpu_device_data mali_gpu_data = {
 	.fb_start = 0xe0000000,
 	.fb_size = 0x01000000,
 #endif
+#endif /* !defined(CONFIG_ARCH_ZYNQ) && !defined(CONFIG_ARCH_ZYNQMP) */
 	.control_interval = 1000, /* 1000ms */
 	.utilization_callback = mali_gpu_utilization_callback,
 	.get_clock_info = NULL,
@@ -505,6 +516,11 @@ int mali_platform_device_init(struct platform_device *device)
 			mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
 		}
 	}
+#elif defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+
+	MALI_DEBUG_PRINT(4, ("Registering Zynq/ZynqMP Mali-400 device\n"));
+	num_pp_cores = 2;
+
 #endif
 
 	/* After kernel 3.15 device tree will default set dev
@@ -517,8 +533,9 @@ int mali_platform_device_init(struct platform_device *device)
 	 */
 	if (!device->dev.dma_mask)
 		device->dev.dma_mask = &device->dev.coherent_dma_mask;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
 	device->dev.archdata.dma_ops = dma_ops;
-
+#endif
 	err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
 
 	if (0 == err) {
@@ -568,13 +585,18 @@ int mali_platform_device_deinit(struct platform_device *device)
 
 #endif /* CONFIG_MALI_DT */
 
+#if !(defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP))
 static u32 mali_read_phys(u32 phys_addr)
 {
 	u32 phys_addr_page = phys_addr & 0xFFFFE000;
 	u32 phys_offset    = phys_addr & 0x00001FFF;
 	u32 map_size       = phys_offset + sizeof(u32);
 	u32 ret = 0xDEADBEEF;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+	void *mem_mapped = ioremap(phys_addr_page, map_size);
+#else
 	void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+#endif
 	if (NULL != mem_mapped) {
 		ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset);
 		iounmap(mem_mapped);
@@ -582,6 +604,7 @@ static u32 mali_read_phys(u32 phys_addr)
 
 	return ret;
 }
+#endif
 
 #if defined(CONFIG_ARCH_REALVIEW)
 static void mali_write_phys(u32 phys_addr, u32 value)
@@ -589,7 +612,11 @@ static void mali_write_phys(u32 phys_addr, u32 value)
 	u32 phys_addr_page = phys_addr & 0xFFFFE000;
 	u32 phys_offset    = phys_addr & 0x00001FFF;
 	u32 map_size       = phys_offset + sizeof(u32);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
+	void *mem_mapped = ioremap(phys_addr_page, map_size);
+#else
 	void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+#endif
 	if (NULL != mem_mapped) {
 		iowrite32(value, ((u8 *)mem_mapped) + phys_offset);
 		iounmap(mem_mapped);
diff --git a/kr260-kernel-module-mali-r9p0-01rel0-ts1.patch b/kr260-kernel-module-mali-r9p0-01rel0-ts1.patch
new file mode 100644
index 0000000..e69de29
