summaryrefslogtreecommitdiff
path: root/drivers/gpu/mali/mali/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/mali/mali/common')
-rw-r--r--drivers/gpu/mali/mali/common/mali_broadcast.c132
-rw-r--r--drivers/gpu/mali/mali/common/mali_broadcast.h52
-rw-r--r--drivers/gpu/mali/mali/common/mali_dlbu.c209
-rw-r--r--drivers/gpu/mali/mali/common/mali_dlbu.h45
-rw-r--r--drivers/gpu/mali/mali/common/mali_dma.c202
-rw-r--r--drivers/gpu/mali/mali/common/mali_dma.h190
-rw-r--r--drivers/gpu/mali/mali/common/mali_gp.c355
-rw-r--r--drivers/gpu/mali/mali/common/mali_gp.h93
-rw-r--r--drivers/gpu/mali/mali/common/mali_gp_job.c131
-rw-r--r--drivers/gpu/mali/mali/common/mali_gp_job.h186
-rw-r--r--drivers/gpu/mali/mali/common/mali_gp_scheduler.c695
-rw-r--r--drivers/gpu/mali/mali/common/mali_gp_scheduler.h101
-rw-r--r--drivers/gpu/mali/mali/common/mali_group.c2049
-rw-r--r--drivers/gpu/mali/mali/common/mali_group.h322
-rw-r--r--drivers/gpu/mali/mali/common/mali_hw_core.c45
-rw-r--r--drivers/gpu/mali/mali/common/mali_hw_core.h100
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_common.h185
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_core.c1477
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_core.h56
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.c173
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.h99
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_utilization.c440
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_utilization.h65
-rw-r--r--drivers/gpu/mali/mali/common/mali_kernel_vsync.c49
-rw-r--r--drivers/gpu/mali/mali/common/mali_l2_cache.c584
-rw-r--r--drivers/gpu/mali/mali/common/mali_l2_cache.h89
-rw-r--r--drivers/gpu/mali/mali/common/mali_mem_validation.c65
-rw-r--r--drivers/gpu/mali/mali/common/mali_mem_validation.h19
-rw-r--r--drivers/gpu/mali/mali/common/mali_mmu.c433
-rw-r--r--drivers/gpu/mali/mali/common/mali_mmu.h114
-rw-r--r--drivers/gpu/mali/mali/common/mali_mmu_page_directory.c484
-rw-r--r--drivers/gpu/mali/mali/common/mali_mmu_page_directory.h110
-rw-r--r--drivers/gpu/mali/mali/common/mali_osk.h1357
-rw-r--r--drivers/gpu/mali/mali/common/mali_osk_bitops.h162
-rw-r--r--drivers/gpu/mali/mali/common/mali_osk_list.h273
-rw-r--r--drivers/gpu/mali/mali/common/mali_osk_mali.h66
-rw-r--r--drivers/gpu/mali/mali/common/mali_osk_profiling.h141
-rw-r--r--drivers/gpu/mali/mali/common/mali_osk_types.h459
-rw-r--r--drivers/gpu/mali/mali/common/mali_pm.c122
-rw-r--r--drivers/gpu/mali/mali/common/mali_pm.h28
-rw-r--r--drivers/gpu/mali/mali/common/mali_pm_domain.c241
-rw-r--r--drivers/gpu/mali/mali/common/mali_pm_domain.h74
-rw-r--r--drivers/gpu/mali/mali/common/mali_pmu.c406
-rw-r--r--drivers/gpu/mali/mali/common/mali_pmu.h134
-rw-r--r--drivers/gpu/mali/mali/common/mali_pp.c561
-rw-r--r--drivers/gpu/mali/mali/common/mali_pp.h139
-rw-r--r--drivers/gpu/mali/mali/common/mali_pp_job.c280
-rw-r--r--drivers/gpu/mali/mali/common/mali_pp_job.h393
-rw-r--r--drivers/gpu/mali/mali/common/mali_pp_scheduler.c2134
-rw-r--r--drivers/gpu/mali/mali/common/mali_pp_scheduler.h130
-rw-r--r--drivers/gpu/mali/mali/common/mali_scheduler.c112
-rw-r--r--drivers/gpu/mali/mali/common/mali_scheduler.h90
-rw-r--r--drivers/gpu/mali/mali/common/mali_scheduler_types.h34
-rw-r--r--drivers/gpu/mali/mali/common/mali_session.c81
-rw-r--r--drivers/gpu/mali/mali/common/mali_session.h94
-rw-r--r--drivers/gpu/mali/mali/common/mali_soft_job.c437
-rw-r--r--drivers/gpu/mali/mali/common/mali_soft_job.h189
-rw-r--r--drivers/gpu/mali/mali/common/mali_spinlock_reentrant.c77
-rw-r--r--drivers/gpu/mali/mali/common/mali_spinlock_reentrant.h70
-rw-r--r--drivers/gpu/mali/mali/common/mali_timeline.c1452
-rw-r--r--drivers/gpu/mali/mali/common/mali_timeline.h496
-rw-r--r--drivers/gpu/mali/mali/common/mali_timeline_fence_wait.c198
-rw-r--r--drivers/gpu/mali/mali/common/mali_timeline_fence_wait.h67
-rw-r--r--drivers/gpu/mali/mali/common/mali_timeline_sync_fence.c158
-rw-r--r--drivers/gpu/mali/mali/common/mali_timeline_sync_fence.h51
-rw-r--r--drivers/gpu/mali/mali/common/mali_ukk.h564
-rw-r--r--drivers/gpu/mali/mali/common/mali_user_settings_db.c147
-rw-r--r--drivers/gpu/mali/mali/common/mali_user_settings_db.h39
68 files changed, 20805 insertions, 0 deletions
diff --git a/drivers/gpu/mali/mali/common/mali_broadcast.c b/drivers/gpu/mali/mali/common/mali_broadcast.c
new file mode 100644
index 00000000..774f04ef
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_broadcast.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static const int bcast_unit_reg_size = 0x1000;
+static const int bcast_unit_addr_broadcast_mask = 0x0;
+static const int bcast_unit_addr_irq_override_mask = 0x4;
+
+struct mali_bcast_unit {
+ struct mali_hw_core hw_core;
+ u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_bcast_unit *bcast_unit = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+ MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+
+ bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+ if (NULL == bcast_unit) {
+ MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+ return NULL;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) {
+ bcast_unit->current_mask = 0;
+ mali_bcast_reset(bcast_unit);
+
+ return bcast_unit;
+ } else {
+ MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+ }
+
+ _mali_osk_free(bcast_unit);
+
+ return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ mali_hw_core_delete(&bcast_unit->hw_core);
+ _mali_osk_free(bcast_unit);
+}
+
+/* Call this function to add the @group's id into bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+ broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+/* Call this function to remove @group's id from bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_broadcast_mask,
+ bcast_unit->current_mask);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_irq_override_mask,
+ bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_broadcast_mask,
+ 0x0);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_irq_override_mask,
+ 0x0);
+}
diff --git a/drivers/gpu/mali/mali/common/mali_broadcast.h b/drivers/gpu/mali/mali/common/mali_broadcast.h
new file mode 100644
index 00000000..6c7472ce
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_broadcast.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 × (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ * setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+ mali_bcast_reset(bcast_unit);
+}
diff --git a/drivers/gpu/mali/mali/common/mali_dlbu.c b/drivers/gpu/mali/mali/common/mali_dlbu.c
new file mode 100644
index 00000000..905deb91
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_dlbu.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+mali_dma_addr mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = NULL;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum {
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr)) {
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ mali_mmu_release_table_page(mali_dlbu_phys_addr, mali_dlbu_cpu_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) {
+ core->pp_cores_mask = 0;
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) {
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_dlbu_reset(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ u32 dlbu_registers[7];
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+ dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+ dlbu_registers[2] = 0;
+ dlbu_registers[3] = 0;
+ dlbu_registers[4] = 0;
+ dlbu_registers[5] = 0;
+ dlbu_registers[6] = dlbu->pp_cores_mask;
+
+ /* write reset values to core registers */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask |= bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask &= ~bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+ u32 *registers;
+ MALI_DEBUG_ASSERT(job);
+ registers = mali_pp_job_get_dlbu_registers(job);
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+ /* Writing 4 registers:
+ * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/gpu/mali/mali/common/mali_dlbu.h b/drivers/gpu/mali/mali/common/mali_dlbu.h
new file mode 100644
index 00000000..d5436d3d
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_dlbu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+struct mali_dlbu_core;
+
+extern mali_dma_addr mali_dlbu_phys_addr;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_dma.c b/drivers/gpu/mali/mali/common/mali_dma.c
new file mode 100644
index 00000000..81478836
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_dma.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+#include "mali_dma.h"
+
+/**
+ * Size of the Mali-450 DMA unit registers in bytes.
+ */
+#define MALI450_DMA_REG_SIZE 0x08
+
+/**
+ * Value that appears in MEMSIZE if an error occurs when reading the command list.
+ */
+#define MALI450_DMA_BUS_ERR_VAL 0xffffffff
+
+/**
+ * Mali DMA registers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register.
+ */
+typedef enum mali_dma_register {
+
+ MALI450_DMA_REG_SOURCE_ADDRESS = 0x0000,
+ MALI450_DMA_REG_SOURCE_SIZE = 0x0004,
+} mali_dma_register;
+
+struct mali_dma_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_spinlock_t *lock; /**< Lock protecting access to DMA core */
+ mali_dma_pool pool; /**< Memory pool for command buffers */
+};
+
+static struct mali_dma_core *mali_global_dma_core = NULL;
+
+struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource)
+{
+ struct mali_dma_core *dma;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_dma_core);
+
+ dma = _mali_osk_malloc(sizeof(struct mali_dma_core));
+ if (dma == NULL) goto alloc_failed;
+
+ dma->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DMA_COMMAND);
+ if (NULL == dma->lock) goto lock_init_failed;
+
+ dma->pool = mali_dma_pool_create(MALI_DMA_CMD_BUF_SIZE, 4, 0);
+ if (NULL == dma->pool) goto dma_pool_failed;
+
+ err = mali_hw_core_create(&dma->hw_core, resource, MALI450_DMA_REG_SIZE);
+ if (_MALI_OSK_ERR_OK != err) goto hw_core_failed;
+
+ mali_global_dma_core = dma;
+ MALI_DEBUG_PRINT(2, ("Mali DMA: Created Mali APB DMA unit\n"));
+ return dma;
+
+ /* Error handling */
+
+hw_core_failed:
+ mali_dma_pool_destroy(dma->pool);
+dma_pool_failed:
+ _mali_osk_spinlock_term(dma->lock);
+lock_init_failed:
+ _mali_osk_free(dma);
+alloc_failed:
+ MALI_DEBUG_PRINT(2, ("Mali DMA: Failed to create APB DMA unit\n"));
+ return NULL;
+}
+
+void mali_dma_delete(struct mali_dma_core *dma)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma);
+
+ MALI_DEBUG_PRINT(2, ("Mali DMA: Deleted Mali APB DMA unit\n"));
+
+ mali_hw_core_delete(&dma->hw_core);
+ _mali_osk_spinlock_term(dma->lock);
+ mali_dma_pool_destroy(dma->pool);
+ _mali_osk_free(dma);
+}
+
+static void mali_dma_bus_error(struct mali_dma_core *dma)
+{
+ u32 addr = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS);
+
+ MALI_PRINT_ERROR(("Mali DMA: Bus error when reading command list from 0x%lx\n", addr));
+ MALI_IGNORE(addr);
+
+ /* Clear the bus error */
+ mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, 0);
+}
+
+static mali_bool mali_dma_is_busy(struct mali_dma_core *dma)
+{
+ u32 val;
+ mali_bool dma_busy_flag = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(dma);
+
+ val = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE);
+
+ if (MALI450_DMA_BUS_ERR_VAL == val) {
+ /* Bus error reading command list */
+ mali_dma_bus_error(dma);
+ return MALI_FALSE;
+ }
+ if (val > 0) {
+ dma_busy_flag = MALI_TRUE;
+ }
+
+ return dma_busy_flag;
+}
+
+static void mali_dma_start_transfer(struct mali_dma_core *dma, mali_dma_cmd_buf *buf)
+{
+ u32 memsize = buf->size * 4;
+ u32 addr = buf->phys_addr;
+
+ MALI_DEBUG_ASSERT_POINTER(dma);
+ MALI_DEBUG_ASSERT(memsize < (1 << 16));
+ MALI_DEBUG_ASSERT(0 == (memsize & 0x3)); /* 4 byte aligned */
+
+ MALI_DEBUG_ASSERT(!mali_dma_is_busy(dma));
+
+ /* Writes the physical source memory address of chunk containing command headers and data */
+ mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS, addr);
+
+ /* Writes the length of transfer */
+ mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, memsize);
+}
+
+_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf)
+{
+ MALI_DEBUG_ASSERT_POINTER(buf);
+
+ buf->virt_addr = (u32 *)mali_dma_pool_alloc(mali_global_dma_core->pool, &buf->phys_addr);
+ if (NULL == buf->virt_addr) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* size contains the number of words in the buffer and is incremented
+ * as commands are added to the buffer. */
+ buf->size = 0;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf)
+{
+ MALI_DEBUG_ASSERT_POINTER(buf);
+
+ if (NULL == buf->virt_addr) return;
+
+ mali_dma_pool_free(mali_global_dma_core->pool, buf->virt_addr, buf->phys_addr);
+
+ buf->virt_addr = NULL;
+}
+
+_mali_osk_errcode_t mali_dma_start(struct mali_dma_core *dma, mali_dma_cmd_buf *buf)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ _mali_osk_spinlock_lock(dma->lock);
+
+ if (mali_dma_is_busy(dma)) {
+ err = _MALI_OSK_ERR_BUSY;
+ goto out;
+ }
+
+ mali_dma_start_transfer(dma, buf);
+
+out:
+ _mali_osk_spinlock_unlock(dma->lock);
+ return err;
+}
+
+void mali_dma_debug(struct mali_dma_core *dma)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma);
+ MALI_DEBUG_PRINT(1, ("DMA unit registers:\n\t%08x, %08x\n",
+ mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS),
+ mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE)
+ ));
+
+}
+
+struct mali_dma_core *mali_dma_get_global_dma_core(void)
+{
+ /* Returns the global dma core object */
+ return mali_global_dma_core;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_dma.h b/drivers/gpu/mali/mali/common/mali_dma.h
new file mode 100644
index 00000000..1f7ab534
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_dma.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DMA_H__
+#define __MALI_DMA_H__
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_hw_core.h"
+
+#define MALI_DMA_CMD_BUF_SIZE 1024
+
+typedef struct mali_dma_cmd_buf {
+ u32 *virt_addr; /**< CPU address of command buffer */
+ mali_dma_addr phys_addr; /**< Physical address of command buffer */
+ u32 size; /**< Number of prepared words in command buffer */
+} mali_dma_cmd_buf;
+
+/** @brief Create a new DMA unit
+ *
+ * This is called from entry point of the driver in order to create and
+ * intialize the DMA resource
+ *
+ * @param resource it will be a pointer to a DMA resource
+ * @return DMA object on success, NULL on failure
+ */
+struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource);
+
+/** @brief Delete DMA unit
+ *
+ * This is called on entry point of driver if the driver initialization fails
+ * after initialization of the DMA unit. It is also called on the exit of the
+ * driver to delete the DMA resource
+ *
+ * @param dma Pointer to DMA unit object
+ */
+void mali_dma_delete(struct mali_dma_core *dma);
+
+/** @brief Retrieves the MALI DMA core object (if there is)
+ *
+ * @return The Mali DMA object otherwise NULL
+ */
+struct mali_dma_core *mali_dma_get_global_dma_core(void);
+
+/**
+ * @brief Run a command buffer on the DMA unit
+ *
+ * @param dma Pointer to the DMA unit to use
+ * @param buf Pointer to the command buffer to use
+ * @return _MALI_OSK_ERR_OK if the buffer was started successfully,
+ * _MALI_OSK_ERR_BUSY if the DMA unit is busy.
+ */
+_mali_osk_errcode_t mali_dma_start(struct mali_dma_core *dma, mali_dma_cmd_buf *buf);
+
+/**
+ * @brief Create a DMA command
+ *
+ * @param core Mali core
+ * @param reg offset to register of core
+ * @param n number of registers to write
+ */
+MALI_STATIC_INLINE u32 mali_dma_command_write(struct mali_hw_core *core, u32 reg, u32 n)
+{
+ u32 core_offset = core->phys_offset;
+
+ MALI_DEBUG_ASSERT(reg < 0x2000);
+ MALI_DEBUG_ASSERT(n < 0x800);
+ MALI_DEBUG_ASSERT(core_offset < 0x30000);
+ MALI_DEBUG_ASSERT(0 == ((core_offset + reg) & ~0x7FFFF));
+
+ return (n << 20) | (core_offset + reg);
+}
+
+/**
+ * @brief Add a array write to DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param count Number of 4 byte words to write
+ */
+MALI_STATIC_INLINE void mali_dma_write_array(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 *data, u32 count)
+{
+ MALI_DEBUG_ASSERT((buf->size + 1 + count) < MALI_DMA_CMD_BUF_SIZE / 4);
+
+ buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, count);
+
+ _mali_osk_memcpy(buf->virt_addr + buf->size, data, count * sizeof(*buf->virt_addr));
+
+ buf->size += count;
+}
+
+/**
+ * @brief Add a conditional array write to DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param count Number of 4 byte words to write
+ * @param ref Pointer to referance data that can be skipped if equal
+ */
+MALI_STATIC_INLINE void mali_dma_write_array_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 *data, u32 count, const u32 *ref)
+{
+ /* Do conditional array writes are not yet implemented, fallback to a
+ * normal array write. */
+ mali_dma_write_array(buf, core, reg, data, count);
+}
+
+/**
+ * @brief Add a conditional register write to the DMA command buffer
+ *
+ * If the data matches the reference the command will be skipped.
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param ref Pointer to referance data that can be skipped if equal
+ */
+MALI_STATIC_INLINE void mali_dma_write_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 data, const u32 ref)
+{
+ /* Skip write if reference value is equal to data. */
+ if (data == ref) return;
+
+ buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
+
+ buf->virt_addr[buf->size++] = data;
+
+ MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
+}
+
+/**
+ * @brief Add a register write to the DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ */
+MALI_STATIC_INLINE void mali_dma_write(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 data)
+{
+ buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
+
+ buf->virt_addr[buf->size++] = data;
+
+ MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
+}
+
+/**
+ * @brief Prepare DMA command buffer for use
+ *
+ * This function allocates the DMA buffer itself.
+ *
+ * @param buf The mali_dma_cmd_buf to prepare
+ * @return _MALI_OSK_ERR_OK if the \a buf is ready to use
+ */
+_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf);
+
+/**
+ * @brief Check if a DMA command buffer is ready for use
+ *
+ * @param buf The mali_dma_cmd_buf to check
+ * @return MALI_TRUE if buffer is usable, MALI_FALSE otherwise
+ */
+MALI_STATIC_INLINE mali_bool mali_dma_cmd_buf_is_valid(mali_dma_cmd_buf *buf)
+{
+ return NULL != buf->virt_addr;
+}
+
+/**
+ * @brief Return a DMA command buffer
+ *
+ * @param buf Pointer to DMA command buffer to return
+ */
+void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf);
+
+#endif /* __MALI_DMA_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_gp.c b/drivers/gpu/mali/mali/common/mali_gp.c
new file mode 100644
index 00000000..1e633f24
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_gp.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+ struct mali_gp_core *core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_gp_reset(core);
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_gp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_gp,
+ group,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_gp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ mali_gp_reset_async(core);
+ return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ if (mali_gp_job_has_vs_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_GP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+#else
+ {
+ u32 bits = 0;
+
+ if (mali_gp_job_has_vs_job(job))
+ bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ if (mali_gp_job_has_plbu_job(job))
+ bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+
+ mali_hw_core_register_write_relaxed(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits);
+ }
+#endif
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+#endif
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_gp.h b/drivers/gpu/mali/mali/common/mali_gp.h
new file mode 100644
index 00000000..ee5a44d7
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_gp.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+
+/*** Accessor functions ***/
+MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+{
+ return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+{
+ /* Enable all interrupts, except those specified in irq_exceptions */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_gp_job.c b/drivers/gpu/mali/mali/common/mali_gp_job.c
new file mode 100644
index 00000000..1d3d9429
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_gp_job.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
+{
+ struct mali_gp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+ if (NULL != job) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL == job->finished_notification) {
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+ if (NULL == job->oom_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) {
+ _mali_osk_notification_delete(job->finished_notification);
+ _mali_osk_notification_delete(job->oom_notification);
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+ mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->heap_current_addr = job->uargs.frame_registers[4];
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ job->pp_tracker = pp_tracker;
+ if (NULL != job->pp_tracker) {
+ /* Take a reference on PP job's tracker that will be released when the GP
+ job is done. */
+ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker);
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+
+ /* de-allocate the pre-allocated oom notifications */
+ if (NULL != job->oom_notification) {
+ _mali_osk_notification_delete(job->oom_notification);
+ job->oom_notification = NULL;
+ }
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+ _mali_osk_free(job);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+ return gp_counter_src0;
+}
+
+void mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+ gp_counter_src0 = counter;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+ return gp_counter_src1;
+}
+
+void mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+ gp_counter_src1 = counter;
+}
+
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (NULL != job->pp_tracker) {
+ schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success);
+ job->pp_tracker = NULL;
+ }
+
+ return schedule_mask;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_gp_job.h b/drivers/gpu/mali/mali/common/mali_gp_job.h
new file mode 100644
index 00000000..3b525aa5
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_gp_job.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_timeline.h"
+#include "mali_scheduler_types.h"
+
+/**
+ * The structure represents a GP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job {
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+void mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+void mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value1 = value;
+}
+
+/**
+ * Returns MALI_TRUE if first job is after the second job, ordered by job ID.
+ *
+ * @param first First job.
+ * @param second Second job.
+ * @return MALI_TRUE if first job should be ordered after the second job, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_gp_job_is_after(struct mali_gp_job *first, struct mali_gp_job *second)
+{
+ /* A span is used to handle job ID wrapping. */
+ return (mali_gp_job_get_id(first) - mali_gp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN;
+}
+
+/**
+ * Release reference on tracker for PP job that depends on this GP job.
+ *
+ * @note If GP job has a reference on tracker, this function MUST be called before the GP job is
+ * deleted.
+ *
+ * @param job GP job that is done.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @return A scheduling bitmask indicating whether scheduling needs to be done.
+ */
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success);
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_gp_scheduler.c b/drivers/gpu/mali/mali/common/mali_gp_scheduler.c
new file mode 100644
index 00000000..4a3e9a48
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_gp_scheduler.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+enum mali_gp_slot_state {
+ MALI_GP_SLOT_STATE_IDLE,
+ MALI_GP_SLOT_STATE_WORKING,
+ MALI_GP_SLOT_STATE_DISABLED,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot {
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_gp_slot_state state;
+ u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue); /* List of unscheduled jobs. */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue_high); /* List of unscheduled high priority jobs. */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void *data);
+static void mali_gp_scheduler_job_queued(void);
+static void mali_gp_scheduler_job_completed(void);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+static _mali_osk_spinlock_irq_t *gp_scheduler_lock = NULL;
+#else
+static _mali_osk_spinlock_t *gp_scheduler_lock = NULL;
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+ u32 num_groups;
+ u32 i;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ gp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#else
+ gp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ if (NULL == gp_scheduler_lock) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ goto cleanup;
+ }
+
+ gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == gp_scheduler_working_wait_queue) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ goto cleanup;
+ }
+
+ /* Find all the available GP cores */
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+ MALI_DEBUG_ASSERT(NULL != group);
+ if (NULL != group) {
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core) {
+ if (0 == gp_version) {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+ slot.group = group;
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ break; /* There is only one GP, no point in looking for more */
+ }
+ } else {
+ ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ goto cleanup;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+
+cleanup:
+ if (NULL != gp_scheduler_working_wait_queue) {
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+ gp_scheduler_working_wait_queue = NULL;
+ }
+
+ if (NULL != gp_scheduler_lock) {
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ gp_scheduler_lock = NULL;
+ }
+
+ return ret;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state
+ || MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ MALI_DEBUG_ASSERT_POINTER(slot.group);
+ mali_group_delete(slot.group);
+
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_lock(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_lock(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_unlock(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_unlock(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+#if defined(DEBUG)
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock)
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() do {} while (0)
+#endif /* defined(DEBUG) */
+
+/* Group and scheduler must be locked when entering this function. Both will be unlocked before
+ * exiting. */
+static void mali_gp_scheduler_schedule_internal_and_unlock(void)
+{
+ struct mali_gp_job *job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(slot.group->lock);
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state ||
+ (_mali_osk_list_empty(&job_queue) && _mali_osk_list_empty(&job_queue_high))) {
+ mali_gp_scheduler_unlock();
+ mali_group_unlock(slot.group);
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(), 0, 0, 0);
+#endif
+ return; /* Nothing to do, so early out */
+ }
+
+ /* Get next job in queue */
+ if (!_mali_osk_list_empty(&job_queue_high)) {
+ job = _MALI_OSK_LIST_ENTRY(job_queue_high.next, struct mali_gp_job, list);
+ } else {
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job_queue));
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Remove the job from queue */
+ _mali_osk_list_del(&job->list);
+
+ /* Mark slot as busy */
+ slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+ mali_gp_scheduler_unlock();
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+ mali_group_start_gp_job(slot.group, job);
+ mali_group_unlock(slot.group);
+}
+
+void mali_gp_scheduler_schedule(void)
+{
+ mali_group_lock(slot.group);
+ mali_gp_scheduler_lock();
+
+ mali_gp_scheduler_schedule_internal_and_unlock();
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
+ job->finished_notification = NULL;
+
+ mali_gp_job_delete(job);
+ mali_gp_scheduler_job_completed();
+}
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(slot.group == group);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+ /* Release tracker. */
+ schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+ /* Signal PP job. */
+ schedule_mask |= mali_gp_job_signal_pp_tracker(job, success);
+
+ mali_gp_scheduler_lock();
+
+ /* Mark slot as idle again */
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0) {
+ _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+ }
+
+ /* Schedule any queued GP jobs on this group. */
+ mali_gp_scheduler_schedule_internal_and_unlock();
+
+ /* GP is now scheduled, removing it from the mask. */
+ schedule_mask &= ~MALI_SCHEDULER_MASK_GP;
+
+ if (MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
+ /* Releasing the tracker activated other jobs that need scheduling. */
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ }
+
+ /* Sends the job end message to user space and free the job object */
+ mali_gp_scheduler_return_job_to_user(job, success);
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+ _mali_uk_gp_job_suspended_s *jobres;
+ _mali_osk_notification_t *notification;
+
+ mali_gp_scheduler_lock();
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+ slot.returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = mali_gp_job_get_id(job);
+
+ mali_gp_scheduler_unlock();
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notification);
+
+ /*
+ * If this function failed, then we could return the job to user space right away,
+ * but there is a job timer anyway that will do that eventually.
+ * This is not exactly a common case anyway.
+ */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_gp_scheduler_unlock();
+
+ _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended, NULL);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ mali_gp_scheduler_unlock();
+ if (0 == pause_count) {
+ mali_gp_scheduler_schedule();
+ }
+}
+
+mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* We hold a PM reference for every job we hold queued (and running) */
+ _mali_osk_pm_dev_ref_add();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_GP);
+
+ return point;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+ mali_timeline_point point;
+ u32 __user *timeline_point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(), NULL);
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+
+ point = mali_gp_scheduler_submit_job(session, job);
+
+ if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+ /* Let user space know that something failed after the job was started. */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)(uintptr_t)args->ctx);
+
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)(uintptr_t)args->ctx);
+
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_gp_job *resumed_job;
+ _mali_osk_notification_t *new_notification = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL == new_notification) {
+ MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
+ mali_group_lock(slot.group);
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ mali_group_lock(slot.group);
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+
+ resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+ if (NULL != resumed_job) {
+ resumed_job->oom_notification = new_notification;
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_OK;
+ } else {
+ mali_group_unlock(slot.group);
+ _mali_osk_notification_delete(new_notification);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *job, *tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08X.\n", session));
+
+ mali_gp_scheduler_lock();
+
+ /* Find all jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list) {
+ if (job->session == session) {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
+ _mali_osk_list_move(&job->list, &removed_jobs);
+ }
+ }
+
+ /* Find all high priority jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue_high, struct mali_gp_job, list) {
+ if (job->session == session) {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
+ _mali_osk_list_move(&job->list, &removed_jobs);
+ }
+ }
+
+ mali_gp_scheduler_unlock();
+
+ /* Release and delete all found jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &removed_jobs, struct mali_gp_job, list) {
+ mali_timeline_tracker_release(&job->tracker);
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+ mali_gp_job_delete(job);
+ mali_gp_scheduler_job_completed();
+ }
+
+ /* Abort any running jobs from the session. */
+ mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_gp_scheduler_lock();
+ ret = pause_count > 0 && (slot.state == MALI_GP_SLOT_STATE_IDLE || slot.state == MALI_GP_SLOT_STATE_DISABLED);
+ mali_gp_scheduler_unlock();
+
+ return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue_high) ? "empty" : "not empty");
+
+ n += mali_group_dump_state(slot.group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+void mali_gp_scheduler_reset_all_groups(void)
+{
+ if (NULL != slot.group) {
+ mali_group_lock(slot.group);
+ mali_group_reset(slot.group);
+ mali_group_unlock(slot.group);
+ }
+}
+
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ if (NULL != slot.group) {
+ mali_group_zap_session(slot.group, session);
+ }
+}
+
+void mali_gp_scheduler_enable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(slot.group == group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: enabling gp group %p\n", group));
+
+ mali_group_lock(group);
+
+ if (MALI_GROUP_STATE_DISABLED != group->state) {
+ mali_group_unlock(group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already enabled\n", group));
+ return;
+ }
+
+ mali_gp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+
+ /* Pick up any jobs that might have been queued while the GP group was disabled. */
+ mali_gp_scheduler_schedule_internal_and_unlock();
+}
+
+void mali_gp_scheduler_disable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(slot.group == group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: disabling gp group %p\n", group));
+
+ mali_gp_scheduler_suspend();
+ mali_group_lock(group);
+ mali_gp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already disabled\n", group));
+ } else {
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
+ slot.state = MALI_GP_SLOT_STATE_DISABLED;
+ group->state = MALI_GROUP_STATE_DISABLED;
+
+ mali_group_power_off_group(group, MALI_TRUE);
+ }
+
+ mali_gp_scheduler_unlock();
+ mali_group_unlock(group);
+ mali_gp_scheduler_resume();
+}
+
+static mali_scheduler_mask mali_gp_scheduler_queue_job(struct mali_gp_job *job)
+{
+ _mali_osk_list_t *queue = NULL;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_gp_job *iter, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
+
+ job->cache_order = mali_scheduler_get_new_cache_order();
+
+ /* Determine which queue the job should be added to. */
+ if (job->session->use_high_priority_job_queue) {
+ queue = &job_queue_high;
+ } else {
+ queue = &job_queue;
+ }
+
+ /* Find position in queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_gp_job, list) {
+ if (mali_gp_job_is_after(job, iter)) {
+ break;
+ }
+ }
+
+ /* Add job to queue. */
+ _mali_osk_list_add(&job->list, &iter->list);
+
+ /* Set schedule bitmask if the GP core is idle. */
+ if (MALI_GP_SLOT_STATE_IDLE == slot.state) {
+ schedule_mask |= MALI_SCHEDULER_MASK_GP;
+ }
+
+ mali_gp_scheduler_job_queued();
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job), mali_gp_job_get_id(job), "GP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
+
+ return schedule_mask;
+}
+
+mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n", mali_gp_job_get_id(job), job));
+
+ mali_gp_scheduler_lock();
+
+ if (unlikely(job->session->is_aborting)) {
+ /* Before checking if the session is aborting, the scheduler must be locked. */
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) activated while session is aborting.\n", mali_gp_job_get_id(job), job));
+
+ /* This job should not be on any list. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+
+ mali_gp_scheduler_unlock();
+
+ /* Release tracker and delete job. */
+ mali_timeline_tracker_release(&job->tracker);
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+ mali_gp_job_delete(job);
+
+ /* Release the PM ref taken in mali_gp_scheduler_submit_job */
+ _mali_osk_pm_dev_ref_dec();
+
+ /* Since we are aborting we ignore the scheduler mask. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ /* GP job is ready to run, queue it. */
+ schedule_mask = mali_gp_scheduler_queue_job(job);
+
+ mali_gp_scheduler_unlock();
+
+ return schedule_mask;
+}
+
+static void mali_gp_scheduler_job_queued(void)
+{
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the time a GP job is queued.
+ * This will be fine because we only loose the tiny idle gap between jobs, but
+ * we will instead get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+}
+
+static void mali_gp_scheduler_job_completed(void)
+{
+ /* Release the PM reference we got in the mali_gp_scheduler_job_queued() function */
+ _mali_osk_pm_dev_ref_dec();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_gp_end();
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_gp_scheduler.h b/drivers/gpu/mali/mali/common/mali_gp_scheduler.h
new file mode 100644
index 00000000..84a096c1
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_gp_scheduler.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+/**
+ * @brief Abort all running and queued GP jobs from session.
+ *
+* This functions aborts all GP jobs from the specified session. Queued jobs are removed from the
+* queue and jobs currently running on a core will be aborted.
+ *
+ * @param session Session that is aborting.
+ */
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the GP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ */
+void mali_gp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session);
+
+/**
+ * @brief Re-enable a group that has been disabled with mali_gp_scheduler_disable_group
+ *
+ * If a Mali PMU is present, the group will be powered back on and added back
+ * into the GP scheduler.
+ *
+ * @param group Pointer to the group to enable
+ */
+void mali_gp_scheduler_enable_group(struct mali_group *group);
+
+/**
+ * @brief Disable a group
+ *
+ * The group will be taken out of the GP scheduler and powered off, if a Mali
+ * PMU is present.
+ *
+ * @param group Pointer to the group to disable
+ */
+void mali_gp_scheduler_disable_group(struct mali_group *group);
+
+/**
+ * @brief Used by the Timeline system to queue a GP job.
+ *
+ * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
+ * call.
+ */
+mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job);
+
+/**
+ * @brief Schedule queued jobs on idle cores.
+ */
+void mali_gp_scheduler_schedule(void);
+
+/**
+ * @brief Submit a GP job to the GP scheduler.
+ *
+ * This will add the GP job to the Timeline system.
+ *
+ * @param session Session this job belongs to.
+ * @param job GP job that will be submitted
+ * @return Point on GP timeline for job.
+ */
+mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_group.c b/drivers/gpu/mali/mali/common/mali_group.c
new file mode 100644
index 00000000..aa49de07
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_group.c
@@ -0,0 +1,2049 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_scheduler.h"
+#include "mali_osk_profiling.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+/* timer related */
+int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
+
+/* local helper functions */
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
+
+static void mali_group_post_process_job_pp(struct mali_group *group);
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
+
+void mali_group_lock(struct mali_group *group)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_lock(group->lock);
+#else
+ _mali_osk_spinlock_lock(group->lock);
+#endif
+ MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_unlock(group->lock);
+#else
+ _mali_osk_spinlock_unlock(group->lock);
+#endif
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+{
+ struct mali_group *group = NULL;
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_calloc(1, sizeof(struct mali_group));
+ if (NULL != group) {
+ group->timeout_timer = _mali_osk_timer_init();
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_lock_order_t order;
+ _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+ if (NULL != dlbu) {
+ order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
+ } else {
+ order = _MALI_OSK_LOCK_ORDER_GROUP;
+ }
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
+#else
+ group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
+#endif
+
+ if (NULL != group->lock) {
+ group->l2_cache_core[0] = core;
+ group->session = NULL;
+ group->power_is_on = MALI_TRUE;
+ group->state = MALI_GROUP_STATE_IDLE;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->pp_scheduler_list);
+ group->parent_group = NULL;
+ group->l2_cache_core_ref_count[0] = 0;
+ group->l2_cache_core_ref_count[1] = 0;
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
+
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
+{
+ /* This group object now owns the MMU core object */
+ group->mmu = mmu_core;
+ group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+ if (NULL == group->bottom_half_work_mmu) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+ /* This group object no longer owns the MMU core object */
+ group->mmu = NULL;
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+ group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+ if (NULL == group->bottom_half_work_gp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the GP core object */
+ group->gp_core = NULL;
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+ group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+ if (NULL == group->bottom_half_work_pp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the PP core object */
+ group->pp_core = NULL;
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+}
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
+{
+ group->pm_domain = domain;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core) {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ if (mali_global_groups[i] == group) {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups) {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_term(group->lock);
+#else
+ _mali_osk_spinlock_term(group->lock);
+#endif
+ _mali_osk_free(group);
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+ u32 i;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+ i = 0;
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+ MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+ i++;
+ }
+})
+
+/**
+ * @brief Add child group to virtual group parent
+ *
+ * Before calling this function, child must have it's state set to JOINING_VIRTUAL
+ * to ensure it's not touched during the transition period. When this function returns,
+ * child's state will be IN_VIRTUAL.
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+{
+ mali_bool found;
+ u32 i;
+ struct mali_session_data *child_session;
+
+ MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(NULL == child->parent_group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
+
+ _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+ child->state = MALI_GROUP_STATE_IN_VIRTUAL;
+ child->parent_group = parent;
+
+ MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+ MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+ MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+ /* Keep track of the L2 cache cores of child groups */
+ found = MALI_FALSE;
+ for (i = 0; i < 2; i++) {
+ if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+ MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+ parent->l2_cache_core_ref_count[i]++;
+ found = MALI_TRUE;
+ }
+ }
+
+ if (!found) {
+ /* First time we see this L2 cache, add it to our list */
+ i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+ MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core[i] = child->l2_cache_core[0];
+ parent->l2_cache_core_ref_count[i]++;
+ }
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_add_group(parent->bcast_core, child);
+ mali_dlbu_add_group(parent->dlbu_core, child);
+
+ child_session = child->session;
+ child->session = NULL;
+
+ /* Above this comment, only software state is updated and the HW is not
+ * touched. Now, check if Mali is powered and skip the rest if it isn't
+ * powered.
+ */
+
+ if (!update_hw) {
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+ return;
+ }
+
+ /* Update MMU */
+ if (parent->session == child_session) {
+ mali_mmu_zap_tlb(child->mmu);
+ } else {
+ if (NULL == parent->session) {
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ } else {
+ mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ }
+ }
+
+ mali_dlbu_update_mask(parent->dlbu_core);
+
+ /* Start job on child when parent is active */
+ if (NULL != parent->pp_running_job) {
+ struct mali_pp_job *job = parent->pp_running_job;
+ u32 subjob = -1;
+
+ if (mali_pp_job_is_with_dlbu(parent->pp_running_job)) {
+ subjob = mali_pp_core_get_id(child->pp_core);
+ }
+
+ /* Take the next unstarted sub job directly without scheduler lock should be
+ * safe here. Because: 1) Virtual group is the only consumer of this job.
+ * 2) Taking next unstarted sub job doesn't do any change to the job queue itself
+ */
+ if (mali_pp_job_has_unstarted_sub_jobs(job)) {
+ subjob = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, subjob);
+ }
+
+ if (-1 != subjob) {
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
+ /* Reset broadcast unit only when it will help run subjob */
+ mali_bcast_reset(parent->bcast_core);
+
+ mali_group_start_job_on_group(child, job, subjob);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ *
+ * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
+ * to IDLE before it can be used.
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+ u32 i;
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(parent == child->parent_group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
+ /* Removing groups while running is not yet supported. */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
+
+ mali_group_lock(child);
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_remove_group(parent->bcast_core, child);
+ mali_dlbu_remove_group(parent->dlbu_core, child);
+
+ /* Update HW only if power is on */
+ if (mali_pm_is_power_on()) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ _mali_osk_list_delinit(&child->group_list);
+
+ child->session = parent->session;
+ child->parent_group = NULL;
+ child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+ /* Keep track of the L2 cache cores of child groups */
+ i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core_ref_count[i]--;
+
+ if (parent->l2_cache_core_ref_count[i] == 0) {
+ parent->l2_cache_core[i] = NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+
+ mali_group_unlock(child);
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+ struct mali_group *child;
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
+
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+
+ mali_group_remove_group(parent, child);
+
+ return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+ /*
+ * This function should not be used to abort jobs,
+ * currently only called during insmod and PM resume
+ */
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+ group->session = NULL;
+
+ if (NULL != group->dlbu_core) {
+ mali_dlbu_reset(group->dlbu_core);
+ }
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_reset(group->bcast_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_group_reset_mmu(group);
+ }
+
+ if (NULL != group->gp_core) {
+ mali_gp_reset(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_group_reset_pp(group);
+ }
+}
+
+/* This function is called before running a job on virtual group
+ * Remove some child group from the bcast mask necessarily
+ * Set child groups particular registers respectively etc
+ */
+static void mali_group_job_prepare_virtual(struct mali_group *group, struct mali_pp_job *job,
+ u32 first_subjob, u32 last_subjob)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 subjob = first_subjob;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
+
+ /* Set each core specific registers:
+ * 1. Renderer List Address
+ * 2. Fragment Shader Stack Address
+ * Other general registers are set through Broadcast Unit in one go.
+ * Note: for Addtional temporary unused group core in virtual group
+ * we need to remove it from Broadcast Unit before start the job in
+ * this virtual group, otherwise, we may never get Frame_end interrupt.
+ */
+ if (!mali_pp_job_is_with_dlbu(job)) {
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ if (subjob <= last_subjob) {
+ /* Write specific Renderer List Address for each group */
+ mali_pp_write_addr_renderer_list(child->pp_core, job, subjob);
+ /* Write specific stack address for each child group */
+ mali_pp_write_addr_stack(child->pp_core, job, subjob);
+ subjob++;
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual group job %u (0x%08X) part %u/%u started.\n",
+ mali_pp_job_get_id(job), job, subjob,
+ mali_pp_job_get_sub_job_count(job)));
+ } else {
+ /* Some physical group are just redundant for this run
+ * remove it from broadcast
+ */
+ mali_bcast_remove_group(group->bcast_core, child);
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %p remove from bcast_core\n", child));
+ }
+ }
+
+ /* Reset broadcast */
+ mali_bcast_reset(group->bcast_core);
+ } else {
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_write_addr_stack(child->pp_core, job, child->pp_core->core_id);
+ mali_bcast_add_group(group->bcast_core, child);
+ }
+
+ /* Reset broadcast */
+ mali_bcast_reset(group->bcast_core);
+
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write Renderer List Address for each child group */
+ mali_pp_write_addr_renderer_list(group->pp_core, job, 0);
+
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
+ mali_pp_job_get_id(job), job, 1,
+ mali_pp_job_get_sub_job_count(job)));
+ }
+}
+
+/* Call this function to make sure group->group_list are consistent with the group->broad_core mask */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group)
+{
+ struct mali_group *child, *temp;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list, struct mali_group, group_list) {
+ mali_bcast_add_group(group->bcast_core, child);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali group: New physical groups added in virtual group at non dlbu job done"));
+ /**
+ * When new physical groups added in the virtual groups, they may have different
+ * page directory with the virtual group. Here just activate the empty page directory
+ * for the virtual group to avoid potential inconsistent page directory.
+ */
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+}
+
+struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+ return group->gp_core;
+}
+
+struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+ return group->pp_core;
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+ session = mali_gp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+ }
+
+ mali_group_activate_page_directory(group, session);
+
+ mali_gp_job_start(group->gp_core, job);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
+ mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+#endif
+
+ group->gp_running_job = job;
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ /* Setup the timeout timer value and save the job id for the job running on the gp core */
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+/* Used to set all the registers except frame renderer list address and fragment shader stack address
+ * It means the caller must set these two registers properly before calling this function
+ */
+static void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+ struct mali_session_data *session;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+ session = mali_pp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+ }
+
+ if (NULL != group->l2_cache_core[1]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+ }
+
+ mali_group_activate_page_directory(group, session);
+
+ if (mali_group_is_virtual(group)) {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ /* Try to use DMA unit to start job, fallback to writing directly to the core */
+ MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
+ if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
+ mali_pp_job_start(group->pp_core, job, sub_job);
+ }
+ } else {
+ mali_pp_job_start(group->pp_core, job, sub_job);
+ }
+
+ /* if the group is virtual, loop through physical groups which belong to this group
+ * and call profiling events for its cores as virtual */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+#if defined(CONFIG_MALI400_PROFILING)
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ } else { /* group is physical - call profiling events for physical cores */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ }
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+#endif
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job,
+ u32 first_subjob, u32 last_subjob)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
+
+ /* Prepare the group for running this job */
+ mali_group_job_prepare_virtual(group, job, first_subjob, last_subjob);
+
+ /* Start job. General setting for all the PP cores */
+ mali_group_start_pp_job(group, job, first_subjob);
+}
+
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state || MALI_GROUP_STATE_IN_VIRTUAL == group->state);
+
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+ */
+ mali_pp_write_addr_renderer_list(group->pp_core, job, subjob);
+
+ /* Write specific stack address for each child group */
+ mali_pp_write_addr_stack(group->pp_core, job, subjob);
+
+ /* For start a job in a group which is just joining the virtual group
+ * just start the job directly, all the accouting information and state
+ * updates have been covered by virtual group state
+ */
+ if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
+ mali_pp_job_start(group->pp_core, job, subjob);
+ return;
+ }
+
+ /* Start job. General setting for all the PP cores */
+ mali_group_start_pp_job(group, job, subjob);
+}
+
+
+
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (group->state != MALI_GROUP_STATE_OOM ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id) {
+ return NULL; /* Illegal request or job has already been aborted */
+ }
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
+ }
+
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ return group->gp_running_job;
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ _mali_osk_errcode_t err;
+
+ if (!mali_group_is_virtual(group)) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ } else { /* virtual group */
+ err = mali_mmu_reset(group->mmu);
+ if (_MALI_OSK_ERR_OK == err) {
+ return;
+ }
+
+ /* Loop through all members of this virtual group and wait
+ * until they are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ err = mali_mmu_reset(child->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ }
+ }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ mali_pp_reset_async(group->pp_core);
+
+ if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ mali_pp_reset_wait(group->pp_core);
+ } else { /* virtual group */
+ /* Loop through all members of this virtual group and wait until they
+ * are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_reset_wait(child->pp_core);
+ }
+ }
+}
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
+{
+ struct mali_pp_job *pp_job_to_return;
+ u32 pp_sub_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ mali_group_post_process_job_pp(group);
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_pp_reset_async(group->pp_core);
+ }
+
+ pp_job_to_return = group->pp_running_job;
+ pp_sub_job_to_return = group->pp_running_sub_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ /* Return job to user, schedule and unlock group. */
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
+}
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
+{
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ mali_group_post_process_job_gp(group, MALI_FALSE);
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_gp_reset_async(group->gp_core);
+ }
+
+ gp_job_to_return = group->gp_running_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ /* Return job to user, schedule and unlock group. */
+ mali_gp_scheduler_job_done(group, gp_job_to_return, success);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id) {
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ /* Function will unlock the group, so we need to lock it again */
+ mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+ mali_group_lock(group);
+}
+
+static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state ||
+ mali_pp_job_get_id(group->pp_running_job) != job_id) {
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+ mali_group_lock(group);
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_pp_job *pp_job;
+ u32 gp_job_id = 0;
+ u32 pp_job_id = 0;
+ mali_bool abort_pp = MALI_FALSE;
+ mali_bool abort_gp = MALI_FALSE;
+
+ mali_group_lock(group);
+
+ if (mali_group_is_in_virtual(group)) {
+ /* Group is member of a virtual group, don't touch it! */
+ mali_group_unlock(group);
+ return;
+ }
+
+ gp_job = group->gp_running_job;
+ pp_job = group->pp_running_job;
+
+ if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
+ MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+ gp_job_id = mali_gp_job_get_id(gp_job);
+ abort_gp = MALI_TRUE;
+ }
+
+ if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
+ MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+ pp_job_id = mali_pp_job_get_id(pp_job);
+ abort_pp = MALI_TRUE;
+ }
+
+ if (abort_gp) {
+ mali_group_abort_gp_job(group, gp_job_id);
+ }
+ if (abort_pp) {
+ mali_group_abort_pp_job(group, pp_job_id);
+ }
+
+ mali_group_remove_session_if_unused(group, session);
+
+ mali_group_unlock(group);
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if (mali_global_num_groups > index) {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+
+ if (group->session != session) {
+ /* Different session than last time, so we need to do some work */
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
+ mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ group->session = session;
+ } else {
+ /* Same session as last time, so no work required */
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+}
+
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state) {
+ if (group->session == session) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+ }
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ return group->power_is_on;
+}
+
+void mali_group_power_on_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+ || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
+
+ group->power_is_on = MALI_TRUE;
+}
+
+void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+ || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
+
+ /* It is necessary to set group->session = NULL so that the powered off MMU is not written
+ * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
+ * pending bottom_halves does not access powered off cores. */
+
+ group->session = NULL;
+
+ if (do_power_change) {
+ group->power_is_on = MALI_FALSE;
+ }
+}
+
+void mali_group_power_on(void)
+{
+ int i;
+ for (i = 0; i < mali_global_num_groups; i++) {
+ struct mali_group *group = mali_global_groups[i];
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+ } else {
+ mali_group_power_on_group(group);
+ }
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
+}
+
+void mali_group_power_off(mali_bool do_power_change)
+{
+ int i;
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ struct mali_group *group = mali_global_groups[i];
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+ } else {
+ mali_group_power_off_group(group, do_power_change);
+ }
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core) {
+ mali_pp_stop_bus(group->pp_core);
+ } else {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU and clear page fault (if any) */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus, then do a hard reset on them */
+ if (NULL != group->pp_core) {
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child, *temp;
+
+ /* Disable the broadcast unit while we do reset directly on the member cores. */
+ mali_bcast_disable(group->bcast_core);
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_stop_bus_wait(child->pp_core);
+ mali_pp_hard_reset(child->pp_core);
+ }
+
+ mali_bcast_enable(group->bcast_core);
+ } else {
+ mali_pp_stop_bus_wait(group->pp_core);
+ mali_pp_hard_reset(group->pp_core);
+ }
+ } else {
+ mali_gp_stop_bus_wait(group->gp_core);
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ MALI_IGNORE(err);
+
+ group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
+ if (group->gp_core) {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+ }
+ if (group->pp_core) {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+ group->pp_running_job, group->pp_running_sub_job);
+ }
+
+ return n;
+}
+#endif
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (NULL != group->pp_core) {
+ struct mali_pp_job *pp_job_to_return;
+ u32 pp_sub_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+
+ mali_group_post_process_job_pp(group);
+
+ pp_job_to_return = group->pp_running_job;
+ pp_sub_job_to_return = group->pp_running_sub_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+ /* Will unlock group. */
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
+ } else {
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+
+ mali_group_post_process_job_gp(group, MALI_FALSE);
+
+ gp_job_to_return = group->gp_running_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+ /* Will unlock group. */
+ mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+ }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_mmu_core *mmu = group->mmu;
+ u32 int_stat;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
+
+ /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_get_int_status(mmu);
+ if (0 != int_stat) {
+ struct mali_group *parent = group->parent_group;
+
+ /* page fault or bus error, we thread them both in the same way */
+ mali_mmu_mask_all_interrupts(mmu);
+ if (NULL == parent) {
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+ } else {
+ _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
+ }
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_mmu(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_mmu_core *mmu = group->mmu;
+ u32 rawstat;
+ MALI_DEBUG_CODE(u32 status);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ mali_group_lock(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+ if (MALI_FALSE == mali_group_power_is_on(group)) {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
+ mali_group_unlock(group);
+ return;
+ }
+
+ rawstat = mali_mmu_get_rawstat(mmu);
+ MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
+
+ if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+ /* An actual page fault has occurred. */
+#ifdef DEBUG
+ u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%08x from bus id %d of type %s on %s\n",
+ fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->hw_core.description));
+ mali_mmu_pagedir_diag(group->session->page_directory, fault_address);
+#endif
+
+ mali_group_mmu_page_fault_and_unlock(group);
+ return;
+ }
+
+ mali_group_unlock(group);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_gp_core *core = group->gp_core;
+ u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
+
+ irq_readout = mali_gp_get_int_stat(core);
+
+ if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_gp_mask_all_interrupts(core);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
+
+ mali_group_lock(group);
+
+ if (MALI_FALSE == mali_group_power_is_on(group)) {
+ MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ irq_readout = mali_gp_read_rawstat(group->gp_core);
+
+ MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+
+ if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
+ u32 core_status = mali_gp_read_core_status(group->gp_core);
+ if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
+ MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
+ group->core_timed_out = MALI_FALSE;
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+
+ mali_group_complete_gp_and_unlock(group, MALI_TRUE);
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_HANG | MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+ if (0 != irq_errors) {
+ MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+ group->core_timed_out = MALI_FALSE;
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+
+ mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+ return;
+ } else if (group->core_timed_out) { /* SW timeout */
+ group->core_timed_out = MALI_FALSE;
+ if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
+ MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
+
+ mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+ return;
+ }
+ } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+ /* GP wants more memory in order to continue. */
+ MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
+
+ group->state = MALI_GROUP_STATE_OOM;
+ mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+ mali_gp_scheduler_oom(group, group->gp_running_job);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * The only way to get here is if we only got one of two needed END_CMD_LST
+ * interrupts. Enable all but not the complete interrupt that has been
+ * received and continue to run.
+ */
+ mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
+{
+ /* Stop the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL == group->gp_running_job) {
+ /* Nothing to do */
+ return;
+ }
+
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if (suspend) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_pp_core *core = group->pp_core;
+ u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
+
+ /*
+ * For Mali-450 there is one particular case we need to watch out for:
+ *
+ * Criteria 1) this function call can be due to a shared interrupt,
+ * and not necessary because this core signaled an interrupt.
+ * Criteria 2) this core is a part of a virtual group, and thus it should
+ * not do any post processing.
+ * Criteria 3) this core has actually indicated that is has completed by
+ * having set raw_stat/int_stat registers to != 0
+ *
+ * If all this criteria is meet, then we could incorrectly start post
+ * processing on the wrong group object (this should only happen on the
+ * parent group)
+ */
+#if !defined(MALI_UPPER_HALF_SCHEDULING)
+ if (mali_group_is_in_virtual(group)) {
+ /*
+ * This check is done without the group lock held, which could lead to
+ * a potential race. This is however ok, since we will safely re-check
+ * this with the group lock held at a later stage. This is just an
+ * early out which will strongly benefit shared IRQ systems.
+ */
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+#endif
+
+ irq_readout = mali_pp_get_int_stat(core);
+ if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_pp_mask_all_interrupts(core);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Currently no support for this interrupt event for the virtual PP core */
+ if (!mali_group_is_virtual(group)) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
+ irq_readout, 0, 0, 0, 0);
+ }
+#endif
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ /* Check if job is complete without errors */
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
+
+ mali_group_lock(group);
+
+ /* Check if job is complete without errors, again, after taking the group lock */
+ irq_readout = mali_pp_read_rawstat(core);
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+ if (mali_group_is_virtual(group)) {
+ u32 status_readout = mali_pp_read_status(group->pp_core);
+ if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
+ MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+ }
+
+ if (mali_group_is_in_virtual(group)) {
+ /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_FAULT;
+ goto out;
+ }
+
+ group->core_timed_out = MALI_FALSE;
+
+ mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
+
+ /* No need to enable interrupts again, since the core will be reset while completing the job */
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_pp_core *core = group->pp_core;
+ u32 irq_readout;
+ u32 irq_errors;
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ mali_group_lock(group);
+
+ if (mali_group_is_in_virtual(group)) {
+ /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ if (MALI_FALSE == mali_group_power_is_on(group)) {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ irq_readout = mali_pp_read_rawstat(group->pp_core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+
+ /* Check if job is complete without errors */
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
+ if (mali_group_is_virtual(group)) {
+ u32 status_readout = mali_pp_read_status(group->pp_core);
+
+ if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
+ MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+ }
+
+ if (!group->core_timed_out) {
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+ group->core_timed_out = MALI_FALSE;
+
+ mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_OF_FRAME and HANG interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME | MALI200_REG_VAL_IRQ_HANG);
+ if (0 != irq_errors) {
+ MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
+ irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+ group->core_timed_out = MALI_FALSE;
+
+ mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ } else if (group->core_timed_out) { /* SW timeout */
+ group->core_timed_out = MALI_FALSE;
+ if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
+ MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
+ mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
+
+ mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+ } else {
+ mali_group_unlock(group);
+ }
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * We should never get here, re-enable interrupts and continue
+ */
+ if (0 == irq_readout) {
+ MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
+ mali_pp_get_hw_core_desc(group->pp_core)));
+ } else {
+ MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
+ mali_pp_get_hw_core_desc(group->pp_core)));
+ }
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_pp(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job) {
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+ }
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ } else {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job), mali_pp_job_get_flush_id(group->pp_running_job));
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+ }
+}
+
+static void mali_group_timeout(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ group->core_timed_out = MALI_TRUE;
+
+ if (NULL != group->gp_core) {
+ MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ }
+}
+
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ /* Early out - safe even if mutex is not held */
+ if (group->session != session) return;
+
+ mali_group_lock(group);
+
+ mali_group_remove_session_if_unused(group, session);
+
+ if (group->session == session) {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+ if (MALI_TRUE != zap_success) {
+ MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+
+ mali_group_mmu_page_fault_and_unlock(group);
+ return;
+ }
+ }
+
+ mali_group_unlock(group);
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+ u32 source0 = 0;
+ u32 value0 = 0;
+ u32 source1 = 0;
+ u32 value1 = 0;
+ u32 profiling_channel = 0;
+
+ switch (core_num) {
+ case 0:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ case 1:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+ break;
+ case 2:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+ break;
+ default:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ }
+
+ if (0 == core_num) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ if (1 == core_num) {
+ if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+ if (2 == core_num) {
+ if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+
+ _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+mali_bool mali_group_is_enabled(struct mali_group *group)
+{
+ mali_bool enabled = MALI_TRUE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ enabled = MALI_FALSE;
+ }
+ mali_group_unlock(group);
+
+ return enabled;
+}
+
+void mali_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
+ || NULL != mali_group_get_gp_core(group));
+
+ if (NULL != mali_group_get_pp_core(group)) {
+ mali_pp_scheduler_enable_group(group);
+ } else {
+ mali_gp_scheduler_enable_group(group);
+ }
+}
+
+void mali_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
+ || NULL != mali_group_get_gp_core(group));
+
+ if (NULL != mali_group_get_pp_core(group)) {
+ mali_pp_scheduler_disable_group(group);
+ } else {
+ mali_gp_scheduler_disable_group(group);
+ }
+}
+
+static struct mali_pm_domain *mali_group_get_l2_domain(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+
+ /* l2_cache_core[0] stores the related l2 domain */
+ return group->l2_cache_core[0]->pm_domain;
+}
+
+void mali_group_get_pm_domain_ref(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /* Get group used l2 domain ref */
+ mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
+ /* Get group used core domain ref */
+ mali_pm_domain_ref_get(group->pm_domain);
+}
+
+void mali_group_put_pm_domain_ref(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /* Put group used core domain ref */
+ mali_pm_domain_ref_put(group->pm_domain);
+ /* Put group used l2 domain ref */
+ mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
+}
diff --git a/drivers/gpu/mali/mali/common/mali_group.h b/drivers/gpu/mali/mali/common/mali_group.h
new file mode 100644
index 00000000..c7301f25
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_group.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/**
+ * @brief Default max runtime [ms] for a core job - used by timeout timers
+ */
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 4000
+
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+
+enum mali_group_core_state {
+ MALI_GROUP_STATE_IDLE,
+ MALI_GROUP_STATE_WORKING,
+ MALI_GROUP_STATE_OOM,
+ MALI_GROUP_STATE_IN_VIRTUAL,
+ MALI_GROUP_STATE_JOINING_VIRTUAL,
+ MALI_GROUP_STATE_LEAVING_VIRTUAL,
+ MALI_GROUP_STATE_DISABLED,
+};
+
+/* Forward declaration from mali_pm_domain.h */
+struct mali_pm_domain;
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group {
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+
+ mali_bool power_is_on;
+ enum mali_group_core_state state;
+
+ struct mali_gp_core *gp_core;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ struct mali_l2_cache_core *l2_cache_core[2];
+ u32 l2_cache_core_ref_count[2];
+
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_t *lock;
+#else
+ _mali_osk_spinlock_t *lock;
+#endif
+
+ _mali_osk_list_t pp_scheduler_list;
+
+ /* List used for virtual groups. For a virtual group, the list represents the
+ * head element. */
+ _mali_osk_list_t group_list;
+
+ struct mali_group *pm_domain_list;
+ struct mali_pm_domain *pm_domain;
+
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
+ _mali_osk_wq_work_t *bottom_half_work_mmu;
+ _mali_osk_wq_work_t *bottom_half_work_gp;
+ _mali_osk_wq_work_t *bottom_half_work_pp;
+
+ _mali_osk_timer_t *timeout_timer;
+ mali_bool core_timed_out;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
+
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+#if defined(CONFIG_MALI450)
+ return (NULL != group->dlbu_core);
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Check if a group is considered as part of a virtual group
+ *
+ * @note A group is considered to be "part of" a virtual group also during the transition
+ * in to / out of the virtual group.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+#if defined(CONFIG_MALI450)
+ return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
+ MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
+ MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+
+/** @brief Start virtual group Job on a virtual group
+*/
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
+
+
+/** @brief Start a subjob from a particular on a specific PP group
+*/
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob);
+
+
+/** @brief remove all the unused groups in tmp_unused group list, so that the group is in consistent status.
+ */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group);
+
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+mali_bool mali_group_power_is_on(struct mali_group *group);
+void mali_group_power_on_group(struct mali_group *group);
+void mali_group_power_off_group(struct mali_group *group, mali_bool power_status);
+void mali_group_power_on(void);
+
+/** @brief Prepare group for power off
+ *
+ * Update the group's state and prepare for the group to be powered off.
+ *
+ * If do_power_change is MALI_FALSE group session will be set to NULL so that
+ * no more activity will happen to this group, but the power state flag will be
+ * left unchanged.
+ *
+ * @do_power_change MALI_TRUE if power status is to be updated
+ */
+void mali_group_power_off(mali_bool do_power_change);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+/* MMU-related functions */
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+
+/* GP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+
+/* PP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+/** @brief Check if group is enabled
+ *
+ * @param group group to check
+ * @return MALI_TRUE if enabled, MALI_FALSE if not
+ */
+mali_bool mali_group_is_enabled(struct mali_group *group);
+
+/** @brief Enable group
+ *
+ * An enabled job is put on the idle scheduler list and can be used to handle jobs. Does nothing if
+ * group is already enabled.
+ *
+ * @param group group to enable
+ */
+void mali_group_enable(struct mali_group *group);
+
+/** @brief Disable group
+ *
+ * A disabled group will no longer be used by the scheduler. If part of a virtual group, the group
+ * will be removed before being disabled. Cores part of a disabled group is safe to power down.
+ *
+ * @param group group to disable
+ */
+void mali_group_disable(struct mali_group *group);
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+{
+ mali_bool empty = MALI_FALSE;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ if (_mali_osk_list_empty(&group->group_list)) {
+ group->state = MALI_GROUP_STATE_DISABLED;
+ group->session = NULL;
+
+ empty = MALI_TRUE;
+ }
+
+ return empty;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+{
+ mali_bool empty = MALI_FALSE;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ if (_mali_osk_list_empty(&group->group_list)) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ empty = MALI_TRUE;
+ }
+
+ return empty;
+}
+
+
+/* Get group used l2 domain and core domain ref */
+void mali_group_get_pm_domain_ref(struct mali_group *group);
+/* Put group used l2 domain and core domain ref */
+void mali_group_put_pm_domain_ref(struct mali_group *group);
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_hw_core.c b/drivers/gpu/mali/mali/common/mali_hw_core.c
new file mode 100644
index 00000000..e5004abd
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_hw_core.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->phys_offset = resource->base - _mali_osk_resource_base_address();
+ core->description = resource->description;
+ core->size = reg_size;
+
+ MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers) {
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ } else {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/gpu/mali/mali/common/mali_hw_core.h b/drivers/gpu/mali/mali/common/mali_hw_core.h
new file mode 100644
index 00000000..341d3037
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_hw_core.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core {
+ u32 phys_addr; /**< Physical address of the registers */
+ u32 phys_offset; /**< Offset from start of Mali to registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char *description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ if (old_val != new_val) {
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+ }
+}
+
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ if (old_array[i] != write_array[i]) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_common.h b/drivers/gpu/mali/mali/common/mali_kernel_common.h
new file mode 100644
index 00000000..a9566a38
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_common.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+#if defined(_DEBUG)
+#define DEBUG
+#endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+/**
+* Fundamental error macro. Reports an error code. This is abstracted to allow us to
+* easily switch to a different error reporting method if we want, and also to allow
+* us to search for error returns easily.
+*
+* Note no closing semicolon - this is supplied in typical usage:
+*
+* MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+*/
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than
+ * _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function
+ * as an error code. This effectively acts as a guard clause, and propagates
+ * error values up the call stack. This uses a temporary value to ensure that
+ * the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error,
+ * this error will be returned without evaluating the expression in
+ * MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#if defined(CONFIG_MALI_QUIET)
+#define MALI_PRINTF(args)
+#else
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+#endif
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); \
+ } while (0)
+
+//#define MALI_DEBUG_PRINT(level, args) do { \
+// if((level) <= mali_debug_level)\
+// {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+// } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_core.c b/drivers/gpu/mali/mali/common/mali_kernel_core.c
new file mode 100644
index 00000000..66603357
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_core.c
@@ -0,0 +1,1477 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_dma.h"
+#include "mali_timeline.h"
+#include "mali_soft_job.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+unsigned int mali_shared_mem_size = 0;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/* Mali max job runtime */
+extern int mali_max_job_runtime;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+ global_gpu_base_address = _mali_osk_resource_base_address();
+ if (0 == global_gpu_base_address) {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+ switch (resource_pp->base - global_gpu_base_address) {
+ case 0x08000:
+ case 0x20000: /* fall-through for aliased mapping */
+ return 0x01;
+ case 0x0A000:
+ case 0x22000: /* fall-through for aliased mapping */
+ return 0x02;
+ case 0x0C000:
+ case 0x24000: /* fall-through for aliased mapping */
+ return 0x04;
+ case 0x0E000:
+ case 0x26000: /* fall-through for aliased mapping */
+ return 0x08;
+ case 0x28000:
+ return 0x10;
+ case 0x2A000:
+ return 0x20;
+ case 0x2C000:
+ return 0x40;
+ case 0x2E000:
+ return 0x80;
+ default:
+ return 0;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ /*
+ * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
+ * Look at the version register for the first PP core in order to determine the GPU HW revision.
+ */
+
+ u32 first_pp_offset;
+ _mali_osk_resource_t first_pp_resource;
+
+ /* Find out where the first PP core is located */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL)) {
+ /* Mali-300/400/450 */
+ first_pp_offset = 0x8000;
+ } else {
+ /* Mali-200 */
+ first_pp_offset = 0x0000;
+ }
+
+ /* Find the first PP core resource (again) */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource)) {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+ if (NULL != pp_core) {
+ u32 pp_version = mali_pp_core_get_version(pp_core);
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16) {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+ _mali_osk_abort();
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+static void mali_resource_count(u32 *pp_count, u32 *l2_count)
+{
+ *pp_count = 0;
+ *l2_count = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
+ ++(*pp_count);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
+ ++(*l2_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
+ ++(*l2_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
+ ++(*l2_count);
+ }
+}
+
+static void mali_delete_groups(void)
+{
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(0);
+ while (NULL != group) {
+ mali_group_delete(group);
+ group = mali_group_get_glob_group(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups());
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+ struct mali_l2_cache_core *l2;
+
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ while (NULL != l2) {
+ mali_l2_cache_delete(l2);
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (NULL != resource) {
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+ l2_cache = mali_l2_cache_create(resource);
+ if (NULL == l2_cache) {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return NULL;
+ }
+ }
+ MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+ return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (mali_is_mali400()) {
+ _mali_osk_resource_t l2_resource;
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource)) {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ l2_cache = mali_create_l2_cache_core(&l2_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
+ } else if (mali_is_mali450()) {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t l2_gp_resource;
+ _mali_osk_resource_t l2_pp_grp0_resource;
+ _mali_osk_resource_t l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Find corresponding l2 domain */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L21_DOMAIN_INDEX), l2_cache);
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L22_DOMAIN_INDEX), l2_cache);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the group object */
+ group = mali_group_create(cache, NULL, NULL);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ return NULL;
+ }
+
+ /* Create the MMU object inside group */
+ mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+ if (NULL == mmu) {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+
+ if (NULL != resource_gp) {
+ /* Create the GP core object inside this group */
+ struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+ if (NULL == gp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ if (NULL != resource_pp) {
+ struct mali_pp_core *pp_core;
+
+ /* Create the PP core object inside this group */
+ pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+ if (NULL == pp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ /* Reset group */
+ mali_group_lock(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+
+ return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+ _mali_osk_resource_t *resource_pp_bcast,
+ _mali_osk_resource_t *resource_dlbu,
+ _mali_osk_resource_t *resource_bcast)
+{
+ struct mali_mmu_core *mmu_pp_bcast_core;
+ struct mali_pp_core *pp_bcast_core;
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+ /* Create the DLBU core object */
+ dlbu_core = mali_dlbu_create(resource_dlbu);
+ if (NULL == dlbu_core) {
+ MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the Broadcast unit core */
+ bcast_core = mali_bcast_unit_create(resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+#if defined(DEBUG)
+ /* Get a physical PP group to temporarily add to broadcast unit. IRQ
+ * verification needs a physical group in the broadcast unit to test
+ * the broadcast unit interrupt line. */
+ {
+ struct mali_group *phys_group = NULL;
+ int i;
+ for (i = 0; i < mali_group_get_glob_num_groups(); i++) {
+ phys_group = mali_group_get_glob_group(i);
+ if (NULL != mali_group_get_pp_core(phys_group)) break;
+ }
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group));
+
+ /* Add the group temporarily to the broadcast, and update the
+ * broadcast HW. Since the HW is not updated when removing the
+ * group the IRQ check will work when the virtual PP is created
+ * later.
+ *
+ * When the virtual group gets populated, the actually used
+ * groups will be added to the broadcast unit and the HW will
+ * be updated.
+ */
+ mali_bcast_add_group(bcast_core, phys_group);
+ mali_bcast_reset(bcast_core);
+ mali_bcast_remove_group(bcast_core, phys_group);
+ }
+#endif /* DEBUG */
+ group = mali_group_create(NULL, dlbu_core, bcast_core);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+ mali_bcast_unit_delete(bcast_core);
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the MMU object inside group */
+ mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+ if (NULL == mmu_pp_bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the PP core object inside this group */
+ pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+ if (NULL == pp_bcast_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ struct mali_group *group;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+
+ _mali_osk_resource_t resource_gp;
+ _mali_osk_resource_t resource_gp_mmu;
+ _mali_osk_resource_t resource_pp[8];
+ _mali_osk_resource_t resource_pp_mmu[8];
+ _mali_osk_resource_t resource_pp_mmu_bcast;
+ _mali_osk_resource_t resource_pp_bcast;
+ _mali_osk_resource_t resource_dlbu;
+ _mali_osk_resource_t resource_bcast;
+ _mali_osk_errcode_t resource_gp_found;
+ _mali_osk_errcode_t resource_gp_mmu_found;
+ _mali_osk_errcode_t resource_pp_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+ _mali_osk_errcode_t resource_pp_bcast_found;
+ _mali_osk_errcode_t resource_dlbu_found;
+ _mali_osk_errcode_t resource_bcast_found;
+
+ if (!(mali_is_mali400() || mali_is_mali450())) {
+ /* No known HW core */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
+ /* Group settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.max_job_runtime) {
+ mali_max_job_runtime = data.max_job_runtime;
+ }
+ }
+ }
+
+ if (mali_is_mali450()) {
+ /* Mali-450 have separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+
+
+ if (mali_is_mali450()) {
+ resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+
+ if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_dlbu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
+ /* Missing mandatory core(s) for Mali-450 */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != resource_gp_found ||
+ _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Add GP in group, for PMU ref count */
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX), group);
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Find corresponding pp domain */
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_PP0_DOMAIN_INDEX), group);
+
+ mali_inited_pp_cores_group_1++;
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
+ if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+
+ mali_inited_pp_cores_group_1++;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */
+ if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+ mali_inited_pp_cores_group_2++;
+ }
+ }
+ }
+
+ if (mali_is_mali450()) {
+ _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+
+ mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+ mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+ MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_TRUE == _mali_osk_shared_interrupts()) {
+ MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_pm_domains(void)
+{
+ int i;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0x0 == mali_pmu_get_domain_mask(i)) continue;
+
+ if (NULL == mali_pm_domain_create(mali_pmu_get_domain_mask(i))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_use_default_pm_domain_config(void)
+{
+ u32 pp_count_gr1 = 0;
+ u32 pp_count_gr2 = 0;
+ u32 l2_count = 0;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ /* GP core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x00000, NULL)) {
+ mali_pmu_set_domain_mask(MALI_GP_DOMAIN_INDEX, 0x01);
+ }
+
+ /* PP0 - PP3 core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 2);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 1);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 3);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 2);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 4);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 2);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 5);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 2);
+ }
+ }
+
+ /* PP4 - PP7 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP4_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP5_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP6_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP7_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ /* L2gp/L2PP0/L2PP4 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
+ ++l2_count;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 1);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 0);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
+ ++l2_count;
+
+ mali_pmu_set_domain_mask(MALI_L21_DOMAIN_INDEX, 0x01 << 1);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
+ ++l2_count;
+
+ mali_pmu_set_domain_mask(MALI_L22_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using default PMU domain config: (%d) gr1_pp_cores, (%d) gr2_pp_cores, (%d) l2_count. \n", pp_count_gr1, pp_count_gr2, l2_count));
+}
+
+static void mali_set_pmu_global_domain_config(void)
+{
+ _mali_osk_device_data data = { 0, };
+ int i = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Check whether has customized pmu domain configure */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 != data.pmu_domain_config[i]) break;
+ }
+
+ if (MALI_MAX_NUMBER_OF_DOMAINS == i) {
+ mali_use_default_pm_domain_config();
+ } else {
+ /* Copy the customer config to global config */
+ mali_pmu_copy_domain_mask(data.pmu_domain_config, sizeof(data.pmu_domain_config));
+ }
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_resource_t resource_pmu;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu)) {
+ struct mali_pmu_core *pmu;
+
+ mali_set_pmu_global_domain_config();
+
+ pmu = mali_pmu_create(&resource_pmu);
+ if (NULL == pmu) {
+ MALI_PRINT_ERROR(("Failed to create PMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ /* It's ok if the PMU doesn't exist */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_dma(void)
+{
+ _mali_osk_resource_t resource_dma;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x12000, &resource_dma)) {
+ if (NULL == mali_dma_create(&resource_dma)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+ } else {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ _mali_osk_errcode_t ret;
+
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+ /* Memory settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ mali_dedicated_mem_start = data.dedicated_mem_start;
+ mali_dedicated_mem_size = data.dedicated_mem_size;
+ mali_shared_mem_size = data.shared_mem_size;
+ }
+
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+ /* No GPU memory specified */
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+ }
+
+ if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+ /* Dedicated memory */
+ ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 < mali_shared_mem_size) {
+ /* Shared OS memory */
+ ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 == mali_fb_start && 0 == mali_fb_size) {
+ /* Frame buffer settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ mali_fb_start = data.fb_start;
+ mali_fb_size = data.fb_size;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ }
+
+ if (0 != mali_fb_size) {
+ /* Register frame buffer */
+ ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_detect_gpu_class(void)
+{
+ u32 number_of_pp_cores = 0;
+ u32 number_of_l2_caches = 0;
+
+ mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
+ if (number_of_l2_caches > 1) {
+ mali_gpu_class_is_mali450 = MALI_TRUE;
+ }
+}
+
+static _mali_osk_errcode_t mali_init_hw_reset(void)
+{
+#if defined(CONFIG_MALI450)
+ _mali_osk_resource_t resource_bcast;
+
+ /* Ensure broadcast unit is in a good state before we start creating
+ * groups and cores.
+ */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast)) {
+ struct mali_bcast_unit *bcast_core;
+
+ bcast_core = mali_bcast_unit_create(&resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_bcast_unit_delete(bcast_core);
+ }
+#endif /* CONFIG_MALI450 */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ mali_pp_job_initialize();
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err) {
+ /* No biggie if we weren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+ /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+ err = mali_set_global_gpu_base_address();
+ if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+
+ /* Detect gpu class according to l2 cache number */
+ mali_detect_gpu_class();
+
+ err = mali_check_shared_interrupts();
+ if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+
+ err = mali_pp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+ /* Initialize the MALI PMU */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+ /* Make sure the power stays on for the rest of this function */
+ err = _mali_osk_pm_dev_ref_add();
+ if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
+
+ /*
+ * If run-time PM is used, then the mali_pm module has now already been
+ * notified that the power now is on (through the resume callback functions).
+ * However, if run-time PM is not used, then there will probably not be any
+ * calls to the resume callback functions, so we need to explicitly tell it
+ * that the power is on.
+ */
+ mali_pm_set_power_is_on();
+
+ /* Reset PMU HW and ensure all Mali power domains are on */
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ err = mali_pmu_reset(pmu);
+ if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+ }
+
+ /* Ensure HW is in a good state before starting to access cores. */
+ err = mali_init_hw_reset();
+ if (_MALI_OSK_ERR_OK != err) goto init_hw_reset_failed;
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Create PM domains only if PMU exists */
+ if (NULL != pmu) {
+ err = mali_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
+ }
+
+ /* Initialize MMU module */
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+ if (mali_is_mali450()) {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+
+ err = mali_parse_config_dma();
+ if (_MALI_OSK_ERR_OK != err) goto dma_parsing_failed;
+ }
+
+ /* Start configuring the actual Mali hardware. */
+ err = mali_parse_config_l2_cache();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+ /* Initialize the schedulers */
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+ err = mali_gp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+
+ /* PP scheduler population can't fail */
+ mali_pp_scheduler_populate();
+
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+
+ MALI_SUCCESS; /* all ok */
+
+ /* Error handling */
+
+utilization_init_failed:
+ mali_pp_scheduler_depopulate();
+ mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+ mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+ mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
+ mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
+ {
+ struct mali_dma_core *dma = mali_dma_get_global_dma_core();
+ if (NULL != dma) mali_dma_delete(dma);
+ }
+dma_parsing_failed:
+ mali_dlbu_terminate();
+dlbu_init_failed:
+ mali_mmu_terminate();
+mmu_init_failed:
+ mali_pm_domain_terminate();
+pm_domain_failed:
+ /* Nothing to roll back */
+product_info_parsing_failed:
+ /* Nothing to roll back */
+init_hw_reset_failed:
+ /* Nothing to roll back */
+pmu_reset_failed:
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+pm_always_on_failed:
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ mali_pmu_delete(pmu);
+ }
+parse_pmu_config_failed:
+ mali_pm_terminate();
+pm_init_failed:
+ mali_pp_scheduler_terminate();
+pp_scheduler_init_failed:
+check_shared_interrupts_failed:
+ global_gpu_base_address = 0;
+set_global_gpu_base_address_failed:
+ /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
+parse_memory_config_failed:
+ mali_memory_terminate();
+memory_init_failed:
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+ mali_session_terminate();
+session_init_failed:
+ mali_pp_job_terminate();
+ return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ struct mali_dma_core *dma = mali_dma_get_global_dma_core();
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ /* shut down subsystems in reverse order from startup */
+
+ /* We need the GPU to be powered up for the terminate sequence */
+ _mali_osk_pm_dev_ref_add();
+
+ mali_utilization_term();
+ mali_pp_scheduler_depopulate();
+ mali_gp_scheduler_terminate();
+ mali_scheduler_terminate();
+ mali_delete_l2_cache_cores();
+ if (mali_is_mali450()) {
+ mali_dlbu_terminate();
+ }
+ mali_mmu_terminate();
+ if (NULL != pmu) {
+ mali_pmu_delete(pmu);
+ }
+ if (NULL != dma) {
+ mali_dma_delete(dma);
+ }
+ mali_pm_terminate();
+ mali_memory_terminate();
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+
+ mali_pp_scheduler_terminate();
+ mali_session_terminate();
+
+ mali_pp_job_terminate();
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+ return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+ return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete(notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
+{
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if (NULL == notification) {
+ MALI_PRINT_ERROR(("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (!session->use_high_priority_job_queue) {
+ session->use_high_priority_job_queue = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ u32 i;
+ struct mali_session_data *session;
+
+ /* allocated struct to track this session */
+ session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session->ioctl_queue) {
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session->page_directory) {
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
+ MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (0 != mali_dlbu_phys_addr) {
+ mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Create soft system. */
+ session->soft_job_system = mali_soft_job_system_create(session);
+ if (NULL == session->soft_job_system) {
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Create timeline system. */
+ session->timeline_system = mali_timeline_system_create(session);
+ if (NULL == session->timeline_system) {
+ mali_soft_job_system_destroy(session->soft_job_system);
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&session->number_of_window_jobs, 0)) {
+ MALI_DEBUG_PRINT_ERROR(("Initialization of atomic number_of_window_jobs failed.\n"));
+ mali_timeline_system_destroy(session->timeline_system);
+ mali_soft_job_system_destroy(session->soft_job_system);
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ session->use_high_priority_job_queue = MALI_FALSE;
+
+ /* Initialize list of PP jobs on this session. */
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list);
+
+ /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) {
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
+ }
+
+ *context = (void *)session;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session);
+
+ MALI_DEBUG_PRINT(2, ("Session started\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+ MALI_DEBUG_ASSERT_POINTER(session->timeline_system);
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* This flag is used to prevent queueing of jobs due to activation. */
+ session->is_aborting = MALI_TRUE;
+
+ /* Stop the soft job timer. */
+ mali_timeline_system_stop_timer(session->timeline_system);
+
+ /* Abort queued and running GP and PP jobs. */
+ mali_gp_scheduler_abort_session(session);
+ mali_pp_scheduler_abort_session(session);
+
+ /* Abort the soft job system. */
+ mali_soft_job_system_abort(session->soft_job_system);
+
+ /* Force execution of all pending bottom half processing for GP and PP. */
+ _mali_osk_wq_flush();
+
+ /* The session PP list should now be empty. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list));
+
+ /* At this point the GP and PP scheduler no longer has any jobs queued or running from this
+ * session, and all soft jobs in the soft job system has been destroyed. */
+
+ /* Any trackers left in the timeline system are directly or indirectly waiting on external
+ * sync fences. Cancel all sync fence waiters to trigger activation of all remaining
+ * trackers. This call will sleep until all timelines are empty. */
+ mali_timeline_system_abort(session->timeline_system);
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_wq_flush();
+
+ /* Destroy timeline system. */
+ mali_timeline_system_destroy(session->timeline_system);
+ session->timeline_system = NULL;
+
+ /* Destroy soft system. */
+ mali_soft_job_system_destroy(session->soft_job_system);
+ session->soft_job_system = NULL;
+
+ MALI_DEBUG_CODE({
+ /* Check that the pp_job_fb_lookup_list array is empty. */
+ u32 i;
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i)
+ {
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_fb_lookup_list[i]));
+ }
+ });
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ _mali_osk_atomic_term(&session->number_of_window_jobs);
+#endif
+
+ /* Free session data structures */
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char *buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_gp_scheduler_dump_state(buf + n, size - n);
+ n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_core.h b/drivers/gpu/mali/mali/common/mali_kernel_core.h
new file mode 100644
index 00000000..88dc80ed
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_core.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+typedef enum {
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+extern mali_bool mali_gpu_class_is_mali450;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+#if defined(CONFIG_MALI450)
+ return mali_gpu_class_is_mali450;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+#if !defined(CONFIG_MALI450)
+ return MALI_TRUE;
+#else
+ return !mali_gpu_class_is_mali450;
+#endif
+}
+
+#endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 00000000..893994e7
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table *descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table *table);
+
+mali_descriptor_mapping *mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping *map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map) {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table) {
+ map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock) {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping *map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_mutex_rw_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping *map, void *target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings) {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table *new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping *map, void (*callback)(int, void *))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i) {
+ if (_mali_osk_test_bit(i, map->table->usage)) {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping *map, int descriptor, void **target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ } else *target = NULL;
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping *map, int descriptor, void *target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping *map, int descriptor)
+{
+ void *old_value = NULL;
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ old_value = map->table->mappings[descriptor];
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return old_value;
+}
+
+static mali_descriptor_table *descriptor_table_alloc(int count)
+{
+ mali_descriptor_table *table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG) + (sizeof(void *) * count));
+
+ if (NULL != table) {
+ table->usage = (u32 *)((u8 *)table + sizeof(mali_descriptor_table));
+ table->mappings = (void **)((u8 *)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table *table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 00000000..de2b8f55
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table {
+ u32 *usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void **mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping {
+ _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table *table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping *mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping *map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping *map, void *target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping *map, int descriptor, void **target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping *map, int descriptor, void *target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping *map, void (*callback)(int, void *));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping *map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_utilization.c b/drivers/gpu/mali/mali/common/mali_kernel_utilization.c
new file mode 100644
index 00000000..19a9aae6
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_utilization.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_scheduler.h"
+
+/* Thresholds for GP bound detection. */
+#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
+#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+static _mali_osk_spinlock_irq_t *time_data_lock;
+
+static u32 num_running_gp_cores;
+static u32 num_running_pp_cores;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u64 period_start_time = 0;
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+static u32 mali_utilization_timeout = 1000;
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+extern void mali_power_performance_policy_callback(struct mali_gpu_utilization_data *data);
+#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
+
+static u32 calculate_window_render_fps(u64 time_period)
+{
+ u32 max_window_number;
+ u64 tmp;
+ u64 max = time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 time_period_shift;
+ u32 max_window_number_shift;
+ u32 ret_val;
+
+ max_window_number = mali_session_max_window_num();
+ /* To avoid float division, extend the dividend to ns unit */
+ tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+ if (tmp > time_period) {
+ max = tmp;
+ }
+
+ /*
+ * We may have 64-bit values, a dividend or a divisor or both
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ */
+ leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+ shift_val = 32 - leading_zeroes;
+
+ time_period_shift = (u32)(time_period >> shift_val);
+ max_window_number_shift = (u32)(tmp >> shift_val);
+
+ ret_val = max_window_number_shift / time_period_shift;
+
+ return ret_val;
+}
+#endif /* defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) */
+
+static void calculate_gpu_utilization(void *arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized_gpu;
+ u32 work_normalized_gp;
+ u32 work_normalized_pp;
+ u32 period_normalized;
+ u32 utilization_gpu;
+ u32 utilization_gp;
+ u32 utilization_pp;
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ u32 window_render_fps;
+#endif
+
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+ /*
+ * No work done for this period
+ * - No need to reschedule timer
+ * - Report zero usage
+ */
+ timer_running = MALI_FALSE;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ if (NULL != mali_utilization_callback) {
+ struct mali_gpu_utilization_data data = { 0, };
+ mali_utilization_callback(&data);
+ }
+
+ mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time_gpu != 0) {
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = time_now;
+
+ /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+ if (work_start_time_gp != 0) {
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = time_now;
+ }
+
+ if (work_start_time_pp != 0) {
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = time_now;
+ }
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+ work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+ work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF) {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ } else {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized_gpu <<= 8;
+ work_normalized_gp <<= 8;
+ work_normalized_pp <<= 8;
+ }
+
+ utilization_gpu = work_normalized_gpu / period_normalized;
+ utilization_gp = work_normalized_gp / period_normalized;
+ utilization_pp = work_normalized_pp / period_normalized;
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ window_render_fps = calculate_window_render_fps(time_period);
+#endif
+
+ last_utilization_gpu = utilization_gpu;
+ last_utilization_gp = utilization_gp;
+ last_utilization_pp = utilization_pp;
+
+ if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
+ (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
+ mali_scheduler_hint_enable(MALI_SCHEDULER_HINT_GP_BOUND);
+ } else {
+ mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+ }
+
+ /* starting a new period */
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+ period_start_time = time_now;
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+
+ if (NULL != mali_utilization_callback) {
+ struct mali_gpu_utilization_data data = {
+ utilization_gpu, utilization_gp, utilization_pp,
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ window_render_fps, window_render_fps
+#endif
+ };
+ mali_utilization_callback(&data);
+ }
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.utilization_interval) {
+ mali_utilization_timeout = data.utilization_interval;
+ }
+ if (NULL != data.utilization_callback) {
+ mali_utilization_callback = data.utilization_callback;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Platform has it's own policy \n"));
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+ }
+ }
+#endif
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ if (mali_utilization_callback == NULL) {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: MALI Power Performance Policy Algorithm \n"));
+ mali_utilization_callback = mali_power_performance_policy_callback;
+ }
+#endif
+
+ if (NULL == mali_utilization_callback) {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+ }
+
+ time_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+ if (NULL == time_data_lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ num_running_gp_cores = 0;
+ num_running_pp_cores = 0;
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer) {
+ _mali_osk_spinlock_irq_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ _mali_osk_timer_del(utilization_timer);
+ return;
+ }
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer) {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_spinlock_irq_term(time_data_lock);
+}
+
+void mali_utilization_gp_start(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ ++num_running_gp_cores;
+ if (1 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First GP core started, consider GP busy from now and onwards */
+ work_start_time_gp = time_now;
+
+ if (0 == num_running_pp_cores) {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+ }
+
+ /* Start a new period (and timer) if needed */
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+ period_start_time = time_now;
+
+ /* Clear session->number_of_window_jobs */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ mali_session_max_window_num();
+#endif
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ } else {
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+ } else {
+ /* Nothing to do */
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+}
+
+void mali_utilization_pp_start(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ ++num_running_pp_cores;
+ if (1 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First PP core started, consider PP busy from now and onwards */
+ work_start_time_pp = time_now;
+
+ if (0 == num_running_gp_cores) {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+ }
+
+ /* Start a new period (and timer) if needed */
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+ period_start_time = time_now;
+
+ /* Clear session->number_of_window_jobs */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ mali_session_max_window_num();
+#endif
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ } else {
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+ } else {
+ /* Nothing to do */
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+}
+
+void mali_utilization_gp_end(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ --num_running_gp_cores;
+ if (0 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last GP core ended, consider GP idle from now and onwards */
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = 0;
+
+ if (0 == num_running_pp_cores) {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+void mali_utilization_pp_end(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ --num_running_pp_cores;
+ if (0 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last PP core ended, consider PP idle from now and onwards */
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = 0;
+
+ if (0 == num_running_gp_cores) {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+ return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+ return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+ return last_utilization_pp;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_utilization.h b/drivers/gpu/mali/mali/common/mali_kernel_utilization.h
new file mode 100644
index 00000000..7d588ebf
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_utilization.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
+{
+ return (NULL != mali_utilization_callback);
+}
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_kernel_vsync.c b/drivers/gpu/mali/mali/common/mali_kernel_vsync.c
new file mode 100644
index 00000000..d067937c
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_kernel_vsync.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/mali/mali/common/mali_l2_cache.c b/drivers/gpu/mali/mali/common/mali_l2_cache.c
new file mode 100644
index 00000000..3893f27e
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_l2_cache.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command {
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable {
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status {
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_lock(cache->counter_lock);
+#else
+ _mali_osk_spinlock_lock(cache->counter_lock);
+#endif
+}
+
+static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_unlock(cache->counter_lock);
+#else
+ _mali_osk_spinlock_unlock(cache->counter_lock);
+#endif
+}
+
+static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_lock(cache->command_lock);
+#else
+ _mali_osk_spinlock_lock(cache->command_lock);
+#endif
+}
+
+static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_unlock(cache->command_lock);
+#else
+ _mali_osk_spinlock_unlock(cache->command_lock);
+#endif
+}
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *cache = NULL;
+
+ MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+ if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL != cache) {
+ cache->core_id = mali_global_num_l2_cache_cores;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->pm_domain = NULL;
+ cache->mali_l2_status = MALI_L2_NORMAL;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+ MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+ resource->description,
+ 1 << (((cache_size >> 16) & 0xff) - 10),
+ 1 << ((cache_size >> 8) & 0xff),
+ 1 << (cache_size & 0xff),
+ 1 << ((cache_size >> 24) & 0xff)));
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#else
+ cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#endif
+ if (NULL != cache->command_lock) {
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#else
+ cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#endif
+ if (NULL != cache->counter_lock) {
+ mali_l2_cache_reset(cache);
+
+ cache->last_invalidated_id = 0;
+
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+ mali_global_num_l2_cache_cores++;
+
+ return cache;
+ } else {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_term(cache->command_lock);
+#else
+ _mali_osk_spinlock_term(cache->command_lock);
+#endif
+ } else {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ mali_hw_core_delete(&cache->hw_core);
+ }
+
+ _mali_osk_free(cache);
+ } else {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+
+ /* reset to defaults */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_term(cache->counter_lock);
+ _mali_osk_spinlock_irq_term(cache->command_lock);
+#else
+ _mali_osk_spinlock_term(cache->command_lock);
+ _mali_osk_spinlock_term(cache->counter_lock);
+#endif
+
+ mali_hw_core_delete(&cache->hw_core);
+
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+ if (mali_global_l2_cache_cores[i] == cache) {
+ mali_global_l2_cache_cores[i] = NULL;
+ mali_global_num_l2_cache_cores--;
+
+ if (i != mali_global_num_l2_cache_cores) {
+ /* We removed a l2 cache from the middle of the array -- move the last
+ * l2 cache to the current position to close the gap */
+ mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(cache);
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ return cache->core_id;
+}
+
+static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+ u32 reg_offset = 0;
+ mali_bool core_is_on;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ core_is_on = mali_l2_cache_lock_power_state(cache);
+
+ mali_l2_cache_counter_lock(cache);
+
+ switch (source_id) {
+ case 0:
+ cache->counter_src0 = counter;
+ reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+ break;
+
+ case 1:
+ cache->counter_src1 = counter;
+ reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+ break;
+
+ default:
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock_power_state(cache);
+ return;
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter) {
+ value = counter;
+ }
+
+ if (MALI_TRUE == core_is_on) {
+ mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
+ }
+
+ mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock_power_state(cache);
+}
+
+void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+ mali_l2_cache_core_set_counter_internal(cache, 0, counter);
+}
+
+void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+ mali_l2_cache_core_set_counter_internal(cache, 1, counter);
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ /* Caller must hold the PM lock and know that we are powered on */
+
+ mali_l2_cache_counter_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+
+ return;
+ }
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ }
+
+ mali_l2_cache_counter_unlock(cache);
+}
+
+static void mali_l2_cache_reset_counters_all(void)
+{
+ int i;
+ u32 value;
+ struct mali_l2_cache_core *cache;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ for (i = 0; i < num_cores; i++) {
+ cache = mali_l2_cache_core_get_glob_l2_core(i);
+ if (!cache)
+ continue;
+
+ if (mali_l2_cache_lock_power_state(cache)) {
+ mali_l2_cache_counter_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock_power_state(cache);
+ return;
+ }
+
+ /* Reset performance counters */
+ if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
+ value = 0;
+ } else {
+ value = cache->counter_src0;
+ }
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+
+ if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
+ value = 0;
+ } else {
+ value = cache->counter_src1;
+ }
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+
+ mali_l2_cache_counter_unlock(cache);
+ }
+
+ mali_l2_cache_unlock_power_state(cache);
+ }
+}
+
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (mali_global_num_l2_cache_cores > index) {
+ return mali_global_l2_cache_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2_cache_cores;
+}
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ mali_l2_cache_counter_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+
+ return;
+ }
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+ /* Restart any performance counters (if enabled) */
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+ }
+
+ mali_l2_cache_counter_unlock(cache);
+}
+
+void mali_l2_cache_reset_all(void)
+{
+ int i;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ for (i = 0; i < num_cores; i++) {
+ mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
+ }
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL != cache) {
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+}
+
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL != cache) {
+ /* If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started. */
+ if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
+ return MALI_FALSE;
+ } else {
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ }
+
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+ return MALI_TRUE;
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+ /*additional check*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+ _mali_osk_errcode_t ret;
+ mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+ }
+ }
+ mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+ /*additional check*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+ u32 j;
+ for (j = 0; j < num_pages; j++) {
+ _mali_osk_errcode_t ret;
+ ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
+ }
+ }
+ }
+ mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ }
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+ return mali_pm_domain_lock_state(cache->pm_domain);
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+ return mali_pm_domain_unlock_state(cache->pm_domain);
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ mali_l2_cache_command_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_command_unlock(cache);
+ MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for L2 come back\n"));
+
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++) {
+ if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+ break;
+ }
+ }
+
+ if (i == loop_count) {
+ mali_l2_cache_command_unlock(cache);
+ MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ mali_l2_cache_command_unlock(cache);
+
+ MALI_SUCCESS;
+}
+
+void mali_l2_cache_pause_all(mali_bool pause)
+{
+ int i;
+ struct mali_l2_cache_core *cache;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ mali_l2_power_status status = MALI_L2_NORMAL;
+
+ if (pause) {
+ status = MALI_L2_PAUSE;
+ }
+
+ for (i = 0; i < num_cores; i++) {
+ cache = mali_l2_cache_core_get_glob_l2_core(i);
+ if (NULL != cache) {
+ cache->mali_l2_status = status;
+
+ /* Take and release the counter and command locks to
+ * ensure there are no active threads that didn't get
+ * the status flag update.
+ *
+ * The locks will also ensure the necessary memory
+ * barriers are done on SMP systems.
+ */
+ mali_l2_cache_counter_lock(cache);
+ mali_l2_cache_counter_unlock(cache);
+
+ mali_l2_cache_command_lock(cache);
+ mali_l2_cache_command_unlock(cache);
+ }
+ }
+
+ /* Resume from pause: do the cache invalidation here to prevent any
+ * loss of cache operation during the pause period to make sure the SW
+ * status is consistent with L2 cache status.
+ */
+ if (!pause) {
+ mali_l2_cache_invalidate_all();
+ mali_l2_cache_reset_counters_all();
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_l2_cache.h b/drivers/gpu/mali/mali/common/mali_l2_cache.h
new file mode 100644
index 00000000..4882e314
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_l2_cache.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+struct mali_group;
+struct mali_pm_domain;
+
+/* Flags describing state of the L2 */
+typedef enum mali_l2_power_status {
+ MALI_L2_NORMAL, /**< L2 is in normal state and operational */
+ MALI_L2_PAUSE, /**< L2 may not be accessed and may be powered off */
+} mali_l2_power_status;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 core_id; /**< Unique core ID */
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_t *command_lock; /**< Serialize all L2 cache commands */
+ _mali_osk_spinlock_irq_t *counter_lock; /**< Synchronize L2 cache counter access */
+#else
+ _mali_osk_spinlock_t *command_lock;
+ _mali_osk_spinlock_t *counter_lock;
+#endif
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 last_invalidated_id;
+ struct mali_pm_domain *pm_domain;
+ mali_l2_power_status mali_l2_status; /**< Indicate whether the L2 is paused or not */
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+/**
+ * L2 pause is just a status that the L2 can't be accessed temporarily.
+*/
+void mali_l2_cache_pause_all(mali_bool pause);
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+{
+ cache->pm_domain = domain;
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+void mali_l2_cache_reset_all(void);
+
+struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_mem_validation.c b/drivers/gpu/mali/mali/common/mali_mem_validation.c
new file mode 100644
index 00000000..b2c3c175
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_mem_validation.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct {
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = start;
+ mali_mem_validator.size = size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
+ if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_mem_validation.h b/drivers/gpu/mali/mali/common/mali_mem_validation.h
new file mode 100644
index 00000000..f0a76d1c
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_mmu.c b/drivers/gpu/mali/mali/common/mali_mmu.c
new file mode 100644
index 00000000..585e803f
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_mmu.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command {
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+static mali_io_address mali_empty_page_directory_virt = NULL;
+
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
+ if (0 == mali_empty_page_directory_phys) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping)) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+ struct mali_mmu_core *mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+ if (NULL != mmu) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
+ if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
+ if (is_virtual) {
+ /* Skip reset and IRQ setup for virtual MMU */
+ return mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_mmu,
+ group,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ resource->description);
+ if (NULL != mmu->irq) {
+ return mmu;
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_group_remove_mmu_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ if (NULL != mmu->irq) {
+ _mali_osk_irq_term(mmu->irq);
+ }
+
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+ return MALI_TRUE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
+ break;
+ }
+ if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
+ break;
+ }
+ if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ if (!stall_success) {
+ err = _MALI_OSK_ERR_BUSY;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success) {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/gpu/mali/mali/common/mali_mmu.h b/drivers/gpu/mali/mali/common/mali_mmu.h
new file mode 100644
index 00000000..f3040da2
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_mmu.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt {
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits {
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+ MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_mmu_page_directory.c b/drivers/gpu/mali/mali/common/mali_mmu_page_directory.c
new file mode 100644
index 00000000..3e0d9d40
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_mmu_page_directory.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(mali_io_address *virt_addr)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ mali_dma_addr address;
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+ /* Allocation failed */
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_mmu_release_table_page(address, mapping);
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
+ return 0;
+ }
+
+ *virt_addr = mapping;
+ return address;
+}
+
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
+{
+ if (MALI_INVALID_PAGE != address) {
+ mali_mmu_release_table_page(address, virt_addr);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(data_page, data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_table, page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ fill_page(*data_page_mapping, 0);
+ fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
+ fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+ if (MALI_INVALID_PAGE != *page_directory) {
+ mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
+ *page_directory = MALI_INVALID_PAGE;
+ *page_directory_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table) {
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ *page_table_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page) {
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ *data_page_mapping = NULL;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
+ _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ mali_dma_addr pde_phys;
+ int i;
+
+ if (last_pde < first_pde)
+ return _MALI_OSK_ERR_INVALID_ARGS;
+
+ for (i = first_pde; i <= last_pde; i++) {
+ if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ pagedir->page_entries_usage_count[i] = 1;
+ } else {
+ pagedir->page_entries_usage_count[i]++;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++) {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+ mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++) {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
+ size_in_pde = left;
+ } else {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i]--;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i]) {
+ u32 page_phys;
+ void *page_virt;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
+ page_virt = pagedir->page_entries_mapped[i];
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_phys, page_virt);
+ pd_changed = MALI_TRUE;
+ } else {
+ MALI_DEBUG_ASSERT(num_pages_inv < 2);
+ if (num_pages_inv < 2) {
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed) {
+ MALI_DEBUG_ASSERT(num_pages_inv < 3);
+ if (num_pages_inv < 3) {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+ }
+
+ if (invalidate_all) {
+ mali_l2_cache_invalidate_all();
+ } else {
+ mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+ }
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+ _mali_osk_errcode_t err;
+ mali_dma_addr phys;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if (NULL == pagedir) {
+ return NULL;
+ }
+
+ err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+ if (_MALI_OSK_ERR_OK != err) {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ pagedir->page_directory = (u32)phys;
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++) {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+ pagedir->page_directory_mapped,
+ sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+ mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits)
+{
+ u32 end_address = mali_address + size;
+ u32 mali_phys = (u32)phys_address;
+
+ /* Map physical pages into MMU page tables */
+ for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ mali_phys | permission_bits);
+ }
+}
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
+{
+#if defined(DEBUG)
+ u32 pde_index, pte_index;
+ u32 pde, pte;
+
+ pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+ pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+ pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ pde_index * sizeof(u32));
+
+
+ if (pde & MALI_MMU_FLAGS_PRESENT) {
+ u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+ pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+ pte_index * sizeof(u32));
+
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+ "\t\tPTE: %08x, page %08x is %s\n",
+ fault_addr, pte_addr, pte,
+ MALI_MMU_ENTRY_ADDRESS(pte),
+ pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+ } else {
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+ fault_addr, pde));
+ }
+#else
+ MALI_IGNORE(pagedir);
+ MALI_IGNORE(fault_addr);
+#endif
+}
+
+/* For instrumented */
+struct dump_info {
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info) {
+ info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
+
+ if (NULL != info->buffer) {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32) * 2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
+{
+ if (NULL != info) {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer) {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped) {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++) {
+ if (NULL != pagedir->page_entries_mapped[i]) {
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ info.buffer_left = args->size;
+ info.buffer = (u32 *)(uintptr_t)args->buffer;
+
+ args->register_writes = (uintptr_t)info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = (uintptr_t)info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_mmu_page_directory.h b/drivers/gpu/mali/mali/common/mali_mmu_page_directory.h
new file mode 100644
index 00000000..a53fee96
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_mmu_page_directory.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags {
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8,
+ MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10,
+ MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20,
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40,
+ MALI_MMU_FLAGS_READ_CACHEABLE = 0x80,
+ MALI_MMU_FLAGS_READ_ALLOCATE = 0x100,
+ MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION | \
+ MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+ MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+ MALI_MMU_FLAGS_READ_CACHEABLE | \
+ MALI_MMU_FLAGS_READ_ALLOCATE )
+
+#define MALI_MMU_FLAGS_DEFAULT ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION )
+
+
+struct mali_page_directory {
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits);
+
+u32 mali_allocate_empty_page(mali_io_address *virtual);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_osk.h b/drivers/gpu/mali/mali/common/mali_osk.h
new file mode 100644
index 00000000..d5c96517
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_osk.h
@@ -0,0 +1,1357 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#include "mali_osk_types.h"
+#include "mali_osk_specific.h" /* include any per-os specifics */
+#include "mali_osk_locks.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief A high priority version of \a _mali_osk_wq_create_work()
+ *
+ * Creates a work object which can be scheduled in the high priority work queue.
+ *
+ * This is unfortunately needed to get low latency scheduling of the Mali cores. Normally we would
+ * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map
+ * and unmap shared memory when a job is connected to external fences (timelines). And this requires
+ * taking a mutex.
+ *
+ * We do signal a lot of other (low priority) work also as part of the job being finished, and if we
+ * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs
+ * random things instead of starting the next GPU job when the GPU is idle. So setting the gpu
+ * scheduler to high priority does give a visually more responsive system.
+ *
+ * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the high priority work handler
+ *
+ * Function is the same as \a _mali_osk_wq_schedule_work() with the only
+ * difference that it runs in a high (real time) priority on the system.
+ *
+ * Should only be used as a substitue for doing the same work in interrupts.
+ *
+ * This is allowed to sleep, but the work should be small since it will block
+ * all other applications.
+*/
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work);
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+/** @brief Create work in the delayed work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, a timer will be start and the \a handler will be called with
+ * \a data as the argument when timer out
+ *
+ * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush()
+ * when no longer needed.
+ */
+_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work without waiting for it to finish
+ *
+ * Note that the \a work callback function may still be running on return from
+ * _mali_osk_wq_delayed_cancel_work_async().
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work and wait for it to finish
+ *
+ * When this function returns, the \a work was either cancelled or it finished running.
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Put \a work task in global workqueue after delay
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ *
+ * If \a work was already on a queue, this function will return without doing anything
+ *
+ * @param work job to be done
+ * @param delay number of jiffies to wait or 0 for immediate execution
+ */
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay);
+
+/** @} */ /* end group _mali_osk_wq */
+
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description);
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term(_mali_osk_irq_t *irq);
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom);
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom);
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom);
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom);
+
+/** @brief Assign a new val to atomic counter, and return the old atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the new value assign to the atomic counter
+ * @return the old value of the atomic counter
+ */
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val);
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc(u32 n, u32 size);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free(void *ptr);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree(void *ptr);
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy(void *dst, const void *src, u32 len);
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset(void *s, u32 c, u32 n);
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated);
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier(void);
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier(void);
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description);
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size);
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description);
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion(u32 phys, u32 size);
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val);
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall(void);
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size);
+
+/** @brief Safely copy as much data as possible from src to dest
+ *
+ * Do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size);
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete(_mali_osk_notification_t *object);
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void);
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue);
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object);
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del(_mali_osk_timer_t *tim);
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term(_mali_osk_timer_t *tim);
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after(u32 ticka, u32 tickb);
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks(u32 ms);
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms(u32 ticks);
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount(void);
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay(u32 usecs);
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns(void);
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz(u32 val);
+
+/** @brief find last (most-significant) bit set
+ *
+ * @param val 32-bit words to count last bit set on
+ * @return last bit set.
+ */
+u32 _mali_osk_fls(u32 val);
+
+/** @} */ /* end group _mali_osk_math */
+
+/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ * @param timeout timeout in ms
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true. Will return if time
+ * exceeds timeout.
+ */
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout);
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue);
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue);
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg(const char *fmt, ...);
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+
+/** @brief Print fmt into print_ctx.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param print_ctx a pointer to the result file buffer
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...);
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Disable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_disable(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
+
+
+/** @brief Release the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ */
+void _mali_osk_pm_dev_ref_dec(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * Will leave the cores powered off if they are already powered off.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ *
+ * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
+
+
+/** @brief Releasing the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+
+/** @brief Block untill pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+#error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+#error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_osk_bitops.h b/drivers/gpu/mali/mali/common/mali_osk_bitops.h
new file mode 100644
index 00000000..525b944e
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_osk_bitops.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value)
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz(isolated);
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_clear_bit(nr, addr);
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_set_bit(nr, addr);
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ return _mali_internal_test_bit(nr, *addr);
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit)
+{
+ u32 total;
+
+ for (total = 0; total < maxbit; total += 32, ++addr) {
+ int result;
+ result = _mali_internal_find_first_zero_bit(*addr);
+
+ /* non-negative signifies the bit was found */
+ if (result >= 0) {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if (total >= maxbit) {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_osk_list.h b/drivers/gpu/mali/mali/common/mali_osk_list.h
new file mode 100644
index 00000000..2cd190e7
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_osk_list.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list)
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+ if (!_mali_osk_list_empty(old_list)) {
+ new_list->next = old_list->next;
+ new_list->prev = old_list->prev;
+ new_list->next->prev = new_list;
+ new_list->prev->next = new_list;
+ old_list->next = old_list;
+ old_list->prev = old_list;
+ }
+}
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+
+/** @brief Enumerate a list in reverse order safely
+ *
+ * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that
+ * entries are enumerated in reverse order.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member))
+
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_osk_mali.h b/drivers/gpu/mali/mali/common/mali_osk_mali.h
new file mode 100644
index 00000000..24356c0f
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_osk_mali.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+typedef struct mali_gpu_device_data _mali_osk_device_data;
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+u32 _mali_osk_resource_base_address(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_osk_profiling.h b/drivers/gpu/mali/mali/common/mali_osk_profiling.h
new file mode 100644
index 00000000..6463177a
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_osk_profiling.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+/* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/mali/mali/common/mali_osk_types.h b/drivers/gpu/mali/mali/common/mali_osk_types.h
new file mode 100644
index 00000000..cb757e87
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_osk_types.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_types.h
+ * Defines types of the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_TYPES_H__
+#define __MALI_OSK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+typedef unsigned char u8;
+typedef signed char s8;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned long long u64;
+#define BITS_PER_LONG (sizeof(long)*8)
+#else
+/* Ensure Linux types u32, etc. are defined */
+#include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+#define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+#define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum {
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t;
+typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)(void *arg);
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)(void *arg);
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg);
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg);
+
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct {
+ union {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * - Holding more than one lock of the same order at the same time is not
+ * allowed.
+ * - Taking a lock of a lower order than the highest-order lock currently held
+ * is not allowed.
+ *
+ */
+typedef enum {
+ /* || Locks || */
+ /* || must be || */
+ /* _||_ taken in _||_ */
+ /* \ / this \ / */
+ /* \/ order! \/ */
+
+ _MALI_OSK_LOCK_ORDER_FIRST = 0,
+
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
+ _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+ _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_DMA_COMMAND,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+ _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+ _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
+ _MALI_OSK_LOCK_ORDER_PMU,
+
+ _MALI_OSK_LOCK_ORDER_LAST,
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * - Any lock can use the order parameter.
+ */
+typedef enum {
+ _MALI_OSK_LOCKFLAG_UNORDERED = 0x1, /**< Indicate that the order of this lock should not be checked */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x2,
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks when we call
+ * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can
+ * be used to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * call functions _mali_osk_mutex_rw_wait/signal().
+ *
+ */
+typedef enum {
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private types for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t;
+typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t;
+typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t;
+typedef struct _mali_osk_mutex_s _mali_osk_mutex_t;
+typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address *mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum {
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct {
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void *result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void *arg);
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s {
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+/** @} */ /* end group _mali_osk_list */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource {
+ const char *description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+/** @} */ /* end group _mali_osk_wait_queue */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+/** @brief Mali print ctx type which uses seq_file
+ */
+typedef struct seq_file _mali_osk_print_ctx;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_pm.c b/drivers/gpu/mali/mali/common/mali_pm.c
new file mode 100644
index 00000000..57c97908
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pm.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+static mali_bool mali_power_on = MALI_FALSE;
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ _mali_osk_pm_dev_enable();
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+ mali_pm_domain_terminate();
+ _mali_osk_pm_dev_disable();
+}
+
+/* Reset GPU after power up */
+static void mali_pm_reset_gpu(void)
+{
+ /* Reset all L2 caches */
+ mali_l2_cache_reset_all();
+
+ /* Reset all groups */
+ mali_scheduler_reset_all_groups();
+}
+
+void mali_pm_os_suspend(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+ mali_utilization_suspend();
+ mali_group_power_off(MALI_TRUE);
+ mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ mali_bool do_reset = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ if (MALI_TRUE != mali_power_on) {
+ do_reset = MALI_TRUE;
+ }
+
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+
+ mali_power_on = MALI_TRUE;
+ _mali_osk_write_mem_barrier();
+
+ if (do_reset) {
+ mali_pm_reset_gpu();
+ mali_group_power_on();
+ }
+
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+void mali_pm_runtime_suspend(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+ mali_group_power_off(MALI_TRUE);
+ mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_runtime_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ mali_bool do_reset = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+
+ if (MALI_TRUE != mali_power_on) {
+ do_reset = MALI_TRUE;
+ }
+
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+
+ mali_power_on = MALI_TRUE;
+ _mali_osk_write_mem_barrier();
+
+ if (do_reset) {
+ mali_pm_reset_gpu();
+ mali_group_power_on();
+ }
+}
+
+void mali_pm_set_power_is_on(void)
+{
+ mali_power_on = MALI_TRUE;
+}
+
+mali_bool mali_pm_is_power_on(void)
+{
+ return mali_power_on;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_pm.h b/drivers/gpu/mali/mali/common/mali_pm.h
new file mode 100644
index 00000000..4f4d0361
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+
+/* Callback functions registered for the runtime PMM system */
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+void mali_pm_set_power_is_on(void);
+mali_bool mali_pm_is_power_on(void);
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_pm_domain.c b/drivers/gpu/mali/mali/common/mali_pm_domain.c
new file mode 100644
index 00000000..8d9d7ccd
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pm_domain.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = { NULL, };
+
+static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+{
+ _mali_osk_spinlock_irq_lock(domain->lock);
+}
+
+static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+{
+ _mali_osk_spinlock_irq_unlock(domain->lock);
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
+{
+ domain->state = state;
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
+{
+ struct mali_pm_domain *domain = NULL;
+ u32 domain_id = 0;
+
+ domain = mali_pm_domain_get_from_mask(pmu_mask);
+ if (NULL != domain) return domain;
+
+ MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+ if (NULL != domain) {
+ domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
+ if (NULL == domain->lock) {
+ _mali_osk_free(domain);
+ return NULL;
+ }
+
+ domain->state = MALI_PM_DOMAIN_ON;
+ domain->pmu_mask = pmu_mask;
+ domain->use_count = 0;
+ domain->group_list = NULL;
+ domain->group_count = 0;
+ domain->l2 = NULL;
+
+ domain_id = _mali_osk_fls(pmu_mask) - 1;
+ /* Verify the domain_id */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask);
+ mali_pm_domains[domain_id] = domain;
+
+ return domain;
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) {
+ return;
+ }
+ _mali_osk_spinlock_irq_term(domain->lock);
+
+ _mali_osk_free(domain);
+}
+
+void mali_pm_domain_terminate(void)
+{
+ int i;
+
+ /* Delete all domains */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ }
+}
+
+void mali_pm_domain_add_group(u32 mask, struct mali_group *group)
+{
+ struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
+ struct mali_group *next;
+
+ if (NULL == domain) return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ ++domain->group_count;
+ next = domain->group_list;
+
+ domain->group_list = group;
+
+ group->pm_domain_list = next;
+
+ mali_group_set_pm_domain(group, domain);
+
+ /* Get pm domain ref after mali_group_set_pm_domain */
+ mali_group_get_pm_domain_ref(group);
+}
+
+void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2)
+{
+ struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
+
+ if (NULL == domain) return;
+
+ MALI_DEBUG_ASSERT(NULL == domain->l2);
+ MALI_DEBUG_ASSERT(NULL != l2);
+
+ domain->l2 = l2;
+
+ mali_l2_cache_set_pm_domain(l2, domain);
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
+{
+ u32 id = 0;
+
+ if (0 == mask) return NULL;
+
+ id = _mali_osk_fls(mask) - 1;
+
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == mask);
+
+ return mali_pm_domains[id];
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+
+ return mali_pm_domains[id];
+}
+
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) return;
+
+ mali_pm_domain_lock(domain);
+ ++domain->use_count;
+
+ if (MALI_PM_DOMAIN_ON != domain->state) {
+ /* Power on */
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+
+ if (NULL != pmu) {
+ _mali_osk_errcode_t err;
+
+ err = mali_pmu_power_up(pmu, domain->pmu_mask);
+
+ if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
+ MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
+ domain->pmu_mask));
+ }
+ }
+ mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
+ } else {
+ MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+ }
+
+ mali_pm_domain_unlock(domain);
+}
+
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) return;
+
+ mali_pm_domain_lock(domain);
+ --domain->use_count;
+
+ if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state) {
+ /* Power off */
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+
+ mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+
+ if (NULL != pmu) {
+ _mali_osk_errcode_t err;
+
+ err = mali_pmu_power_down(pmu, domain->pmu_mask);
+
+ if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
+ MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
+ domain->pmu_mask));
+ }
+ }
+ }
+ mali_pm_domain_unlock(domain);
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+{
+ mali_bool is_powered = MALI_TRUE;
+
+ /* Take a reference without powering on */
+ if (NULL != domain) {
+ mali_pm_domain_lock(domain);
+ ++domain->use_count;
+
+ if (MALI_PM_DOMAIN_ON != domain->state) {
+ is_powered = MALI_FALSE;
+ }
+ mali_pm_domain_unlock(domain);
+ }
+
+ if (!_mali_osk_pm_dev_ref_add_no_power_on()) {
+ is_powered = MALI_FALSE;
+ }
+
+ return is_powered;
+}
+
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
+{
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ if (NULL != domain) {
+ mali_pm_domain_ref_put(domain);
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_pm_domain.h b/drivers/gpu/mali/mali/common/mali_pm_domain.h
new file mode 100644
index 00000000..1915105d
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pm_domain.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+typedef enum {
+ MALI_PM_DOMAIN_ON,
+ MALI_PM_DOMAIN_OFF,
+} mali_pm_domain_state;
+
+struct mali_pm_domain {
+ mali_pm_domain_state state;
+ _mali_osk_spinlock_irq_t *lock;
+
+ s32 use_count;
+
+ u32 pmu_mask;
+
+ int group_count;
+ struct mali_group *group_list;
+
+ struct mali_l2_cache_core *l2;
+};
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
+
+void mali_pm_domain_add_group(u32 mask, struct mali_group *group);
+
+void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_terminate(void);
+
+/** Get PM domain from domain ID
+ */
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
+
+/* Ref counting */
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+{
+ return domain->l2;
+}
+
+MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+{
+ return domain->state;
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+
+#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
+ NULL != (group); (group) = (group)->pm_domain_list)
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_pmu.c b/drivers/gpu/mali/mali/common/mali_pmu.c
new file mode 100644
index 00000000..d3f14728
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pmu.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+u16 mali_pmu_global_domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {0};
+
+static u32 mali_pmu_detect_mask(void);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+ struct mali_hw_core hw_core;
+ _mali_osk_spinlock_t *lock;
+ u32 registered_cores_mask;
+ u32 active_cores_mask;
+ u32 switch_delay;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
+{
+ struct mali_pmu_core *pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ if (NULL != pmu) {
+ pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU);
+ if (NULL != pmu->lock) {
+ pmu->registered_cores_mask = mali_pmu_detect_mask();
+ pmu->active_cores_mask = pmu->registered_cores_mask;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+ _mali_osk_errcode_t err;
+ _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+ if (_MALI_OSK_ERR_OK == err) {
+ pmu->switch_delay = data.pmu_switch_delay;
+ mali_global_pmu_core = pmu;
+ return pmu;
+ }
+ mali_hw_core_delete(&pmu->hw_core);
+ }
+ _mali_osk_spinlock_term(pmu->lock);
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+ _mali_osk_spinlock_term(pmu->lock);
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+ mali_global_pmu_core = NULL;
+}
+
+static void mali_pmu_lock(struct mali_pmu_core *pmu)
+{
+ _mali_osk_spinlock_lock(pmu->lock);
+}
+static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+{
+ _mali_osk_spinlock_unlock(pmu->lock);
+}
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu)
+{
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+ MALI_DEBUG_ASSERT(pmu);
+
+ /* Wait for the command to complete */
+ do {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+ MALI_DEBUG_ASSERT(0 < timeout);
+ if (0 == timeout) {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_power_up_internal(struct mali_pmu_core *pmu, const u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ u32 current_domain;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+ & PMU_REG_VAL_IRQ));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+ if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+#else
+ for (current_domain = 1; current_domain <= pmu->registered_cores_mask; current_domain <<= 1) {
+ if (current_domain & mask & stat) {
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, current_domain);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+ }
+#endif
+
+#if defined(DEBUG)
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+ MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
+#endif /* defined(DEBUG) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_power_down_internal(struct mali_pmu_core *pmu, const u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+ & PMU_REG_VAL_IRQ));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+ /* Do not wait for interrupt on Mali-300/400 if all domains are powered off
+ * by our power down command, because the HW will simply not generate an
+ * interrupt in this case.*/
+ if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) {
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ } else {
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ }
+#if defined(DEBUG)
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+ u32 cores_off_mask, cores_on_mask, stat;
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+
+ cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
+ cores_on_mask = pmu->registered_cores_mask & (stat & pmu->active_cores_mask);
+
+ if (0 != cores_off_mask) {
+ err = mali_pmu_power_down_internal(pmu, cores_off_mask);
+ if (_MALI_OSK_ERR_OK != err) return err;
+ }
+
+ if (0 != cores_on_mask) {
+ err = mali_pmu_power_up_internal(pmu, cores_on_mask);
+ if (_MALI_OSK_ERR_OK != err) return err;
+ }
+
+#if defined(DEBUG)
+ {
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
+ }
+#endif /* defined(DEBUG) */
+
+ mali_pmu_unlock(pmu);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Make sure we have a valid power domain mask */
+ if (mask > pmu->registered_cores_mask) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ mali_pmu_lock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
+
+ pmu->active_cores_mask &= ~mask;
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ if (!mali_pm_is_power_on()) {
+ /* Don't touch hardware if all of Mali is powered off. */
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
+
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ err = mali_pmu_power_down_internal(pmu, mask);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Make sure we have a valid power domain mask */
+ if (mask & ~pmu->registered_cores_mask) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ mali_pmu_lock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
+
+ pmu->active_cores_mask |= mask;
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ if (!mali_pm_is_power_on()) {
+ /* Don't touch hardware if all of Mali is powered off. */
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
+
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ err = mali_pmu_power_up_internal(pmu, mask);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ err = mali_pmu_power_down_internal(pmu, pmu->registered_cores_mask);
+
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);
+
+ mali_pmu_unlock(pmu);
+ return err;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(void)
+{
+ int dynamic_config_pp = 0;
+ int dynamic_config_l2 = 0;
+ int i = 0;
+ u32 mask = 0;
+
+ /* Check if PM domain compatible with actually pp core and l2 cache and collection info about domain */
+ mask = mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX);
+
+ for (i = MALI_PP0_DOMAIN_INDEX; i <= MALI_PP7_DOMAIN_INDEX; i++) {
+ mask |= mali_pmu_get_domain_mask(i);
+
+ if (0x0 != mali_pmu_get_domain_mask(i)) {
+ dynamic_config_pp++;
+ }
+ }
+
+ for (i = MALI_L20_DOMAIN_INDEX; i <= MALI_L22_DOMAIN_INDEX; i++) {
+ mask |= mali_pmu_get_domain_mask(i);
+
+ if (0x0 != mali_pmu_get_domain_mask(i)) {
+ dynamic_config_l2++;
+ }
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali PMU: mask 0x%x, pp_core %d, l2_core %d \n", mask, dynamic_config_pp, dynamic_config_l2));
+
+ return mask;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_pmu.h b/drivers/gpu/mali/mali/common/mali_pmu.h
new file mode 100644
index 00000000..55715f00
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pmu.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+
+#define MALI_GP_DOMAIN_INDEX 0
+#define MALI_PP0_DOMAIN_INDEX 1
+#define MALI_PP1_DOMAIN_INDEX 2
+#define MALI_PP2_DOMAIN_INDEX 3
+#define MALI_PP3_DOMAIN_INDEX 4
+#define MALI_PP4_DOMAIN_INDEX 5
+#define MALI_PP5_DOMAIN_INDEX 6
+#define MALI_PP6_DOMAIN_INDEX 7
+#define MALI_PP7_DOMAIN_INDEX 8
+#define MALI_L20_DOMAIN_INDEX 9
+#define MALI_L21_DOMAIN_INDEX 10
+#define MALI_L22_DOMAIN_INDEX 11
+
+#define MALI_MAX_NUMBER_OF_DOMAINS 12
+
+/* Record the domain config from the customer or default config */
+extern u16 mali_pmu_global_domain_config[];
+
+static inline u16 mali_pmu_get_domain_mask(u32 index)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+
+ return mali_pmu_global_domain_config[index];
+}
+
+static inline void mali_pmu_set_domain_mask(u32 index, u16 value)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+
+ mali_pmu_global_domain_config[index] = value;
+}
+
+static inline void mali_pmu_copy_domain_mask(void *src, u32 len)
+{
+ _mali_osk_memcpy(mali_pmu_global_domain_config, src, len);
+}
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_up.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_down.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_pp.c b/drivers/gpu/mali/mali/common/mali_pp.c
new file mode 100644
index 00000000..cd172201
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pp.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_dma.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+ struct mali_pp_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+ if (NULL != core) {
+ core->core_id = mali_global_num_pp_cores;
+ core->bcast_id = bcast_id;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
+ _mali_osk_errcode_t ret;
+
+ if (!is_virtual) {
+ ret = mali_pp_reset(core);
+ } else {
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_pp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_pp,
+ group,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_pp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++) {
+ if (mali_global_pp_cores[i] == core) {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+
+ if (i != mali_global_num_pp_cores) {
+ /* We removed a PP core from the middle of the array -- move the last
+ * PP core to the current position to close the gap */
+ mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+ mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = {
+ 0x0, /* Renderer List Address Register */
+ 0x0, /* Renderer State Word Base Address Register */
+ 0x0, /* Renderer Vertex Base Register */
+ 0x2, /* Feature Enable Register */
+ 0x0, /* Z Clear Value Register */
+ 0x0, /* Stencil Clear Value Register */
+ 0x0, /* ABGR Clear Value 0 Register */
+ 0x0, /* ABGR Clear Value 1 Register */
+ 0x0, /* ABGR Clear Value 2 Register */
+ 0x0, /* ABGR Clear Value 3 Register */
+ 0x0, /* Bounding Box Left Right Register */
+ 0x0, /* Bounding Box Bottom Register */
+ 0x0, /* FS Stack Address Register */
+ 0x0, /* FS Stack Size and Initial Value Register */
+ 0x0, /* Reserved */
+ 0x0, /* Reserved */
+ 0x0, /* Origin Offset X Register */
+ 0x0, /* Origin Offset Y Register */
+ 0x75, /* Subpixel Specifier Register */
+ 0x0, /* Tiebreak mode Register */
+ 0x0, /* Polygon List Format Register */
+ 0x0, /* Scaling Register */
+ 0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = {
+ 0x0, /* WBx Source Select Register */
+ 0x0, /* WBx Target Address Register */
+ 0x0, /* WBx Target Pixel Format Register */
+ 0x0, /* WBx Target AA Format Register */
+ 0x0, /* WBx Target Layout */
+ 0x0, /* WBx Target Scanline Length */
+ 0x0, /* WBx Target Flags Register */
+ 0x0, /* WBx MRT Enable Register */
+ 0x0, /* WBx MRT Offset Register */
+ 0x0, /* WBx Global Test Enable Register */
+ 0x0, /* WBx Global Test Reference Value Register */
+ 0x0 /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ mali_pp_reset_async(core);
+ return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
+ mali_dma_cmd_buf *buf)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_dma_write_array_conditional(buf, &core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_dma_write_conditional(buf, &core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_dma_write_array_conditional(buf, &core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+ mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+ mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+ mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
+ if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
+ }
+#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index)
+{
+ if (mali_global_num_pp_cores > index) {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob);
+#if defined(CONFIG_MALI400_PROFILING)
+ int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/gpu/mali/mali/common/mali_pp.h b/drivers/gpu/mali/mali/common/mali_pp.h
new file mode 100644
index 00000000..42a12532
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pp.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+#include "mali_dma.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job);
+
+/**
+ * @brief Add commands to DMA command buffer to start PP job on core.
+ */
+void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
+ mali_dma_cmd_buf *buf);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->bcast_id;
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+{
+ return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core,
+ struct mali_pp_job *job, u32 subjob)
+{
+ u32 addr = mali_pp_job_get_addr_frame(job, subjob);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr);
+}
+
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job, u32 subjob)
+{
+ u32 addr = mali_pp_job_get_addr_stack(job, subjob);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_pp_job.c b/drivers/gpu/mali/mali/common/mali_pp_job.c
new file mode 100644
index 00000000..672766c5
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pp_job.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_dma.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_pp_scheduler.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+
+void mali_pp_job_initialize(void)
+{
+ _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0);
+}
+
+void mali_pp_job_terminate(void)
+{
+ _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
+}
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+ _mali_uk_pp_start_job_s __user *uargs, u32 id)
+{
+ struct mali_pp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+ if (NULL != job) {
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
+ goto fail;
+ }
+
+ if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ goto fail;
+ }
+
+ if (!mali_pp_job_use_no_notification(job)) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL == job->finished_notification) goto fail;
+ }
+
+ perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
+
+ /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
+ job->uargs.perf_counter_src0 = pp_counter_src0;
+ job->uargs.perf_counter_src1 = pp_counter_src1;
+
+ /* We only copy the per sub job array if it is enabled with at least one counter */
+ if (0 < sub_job_count) {
+ job->perf_counter_per_sub_job_count = sub_job_count;
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
+ }
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ _mali_osk_list_init(&job->session_list);
+ job->id = id;
+
+ job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ job->num_memory_cookies = job->uargs.num_memory_cookies;
+ if (job->num_memory_cookies > 0) {
+ u32 size;
+ u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
+
+ if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
+ MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
+ goto fail;
+ }
+
+ size = sizeof(*memory_cookies) * job->num_memory_cookies;
+
+ job->memory_cookies = _mali_osk_malloc(size);
+ if (NULL == job->memory_cookies) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+ goto fail;
+ }
+
+ if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+ goto fail;
+ }
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ job->num_dma_bufs = job->num_memory_cookies;
+ job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
+ if (NULL == job->dma_bufs) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
+ goto fail;
+ }
+#endif
+ }
+
+ /* Prepare DMA command buffer to start job, if it is virtual. */
+ if (mali_pp_job_is_virtual_group_job(job)) {
+ struct mali_pp_core *core;
+ _mali_osk_errcode_t err = mali_dma_get_cmd_buf(&job->dma_cmd_buf);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
+ goto fail;
+ }
+
+ core = mali_pp_scheduler_get_virtual_pp();
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
+ /* Not a valid job. */
+ goto fail;
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ return job;
+ }
+
+fail:
+ if (NULL != job) {
+ mali_pp_job_delete(job);
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+ mali_dma_put_cmd_buf(&job->dma_cmd_buf);
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ }
+
+ _mali_osk_free(job->memory_cookies);
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Unmap buffers attached to job */
+ if (0 < job->num_dma_bufs) {
+ mali_dma_buf_unmap_job(job);
+ }
+
+ _mali_osk_free(job->dma_bufs);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ _mali_osk_free(job);
+}
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ return job->uargs.perf_counter_src0;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) {
+ return job->perf_counter_per_sub_job_src0[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src0;
+}
+
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ /* Virtual jobs always use the global job counter */
+ return job->uargs.perf_counter_src1;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) {
+ return job->perf_counter_per_sub_job_src1[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src1;
+}
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter)
+{
+ pp_counter_src0 = counter;
+}
+
+void mali_pp_job_set_pp_counter_global_src1(u32 counter)
+{
+ pp_counter_src1 = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src0[sub_job] = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src1[sub_job] = counter;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src0(void)
+{
+ return pp_counter_src0;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src1(void)
+{
+ return pp_counter_src1;
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src0[sub_job];
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src1[sub_job];
+}
diff --git a/drivers/gpu/mali/mali/common/mali_pp_job.h b/drivers/gpu/mali/mali/common/mali_pp_job.h
new file mode 100644
index 00000000..a5a9a59a
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pp_job.h
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#include "mali_dma.h"
+#include "mali_dlbu.h"
+#include "mali_timeline.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+/**
+ * The structure represents a PP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job {
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ _mali_osk_list_t session_list; /**< Used to link jobs together in the session job list */
+ _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
+ mali_dma_cmd_buf dma_cmd_buf; /**< Command buffer for starting job using Mali-450 DMA unit */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+ u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ u32 num_memory_cookies; /**< Number of memory cookies attached to job */
+ u32 *memory_cookies; /**< Memory cookies attached to job */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ struct mali_dma_buf_attachment **dma_bufs; /**< Array of DMA-bufs used by job */
+ u32 num_dma_bufs; /**< Number of DMA-bufs used by job */
+#endif
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+ u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+ u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+};
+
+void mali_pp_job_initialize(void);
+void mali_pp_job_terminate(void);
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job);
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job);
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter);
+void mali_pp_job_set_pp_counter_global_src1(u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter);
+
+u32 mali_pp_job_get_pp_counter_global_src0(void);
+u32 mali_pp_job_get_pp_counter_global_src1(void);
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job);
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+ return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual_group_job(struct mali_pp_job *job)
+{
+ if (mali_is_mali450()) {
+ return 1 != job->uargs.num_cores;
+ }
+
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_with_dlbu(struct mali_pp_job *job)
+{
+#if defined(CONFIG_MALI450)
+ return 0 == job->uargs.num_cores;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ if (mali_pp_job_is_with_dlbu(job)) {
+ return MALI_DLBU_VIRT_ADDR;
+ } else if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+ ) {
+ /* At least one output unit active */
+ return MALI_FALSE;
+ }
+
+ /* All outputs are disabled - we can abort the job */
+ return MALI_TRUE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_fb_lookup_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ return MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+ u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+ job->sub_job_errors += jobs_remaining;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_success(struct mali_pp_job *job)
+{
+ u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT(job);
+
+ if (0 != job->num_memory_cookies) {
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ job->sub_jobs_completed++;
+ if (MALI_FALSE == success) {
+ job->sub_job_errors++;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ if (0 == job->sub_job_errors) {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+{
+ return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ return job->uargs.perf_counter_flag;
+}
+
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ if (mali_pp_job_is_with_dlbu(job) && job->sub_jobs_num != 1) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Returns MALI_TRUE if first job should be started after second job.
+ *
+ * @param first First job.
+ * @param second Second job.
+ * @return MALI_TRUE if first job should be started after second job, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *first, struct mali_pp_job *second)
+{
+ MALI_DEBUG_ASSERT_POINTER(first);
+ MALI_DEBUG_ASSERT_POINTER(second);
+
+ /* First job should be started after second job if second job is in progress. */
+ if (0 < second->sub_jobs_started) {
+ return MALI_TRUE;
+ }
+
+ /* First job should be started after second job if first job has a higher job id. A span is
+ used to handle job id wrapping. */
+ if ((mali_pp_job_get_id(first) - mali_pp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN) {
+ return MALI_TRUE;
+ }
+
+ /* Second job should be started after first job. */
+ return MALI_FALSE;
+}
+
+/**
+ * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
+ *
+ * @param job Job to check.
+ * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+
+ return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
+}
+
+/**
+ * Get PP job's Timeline tracker.
+ *
+ * @param job PP job.
+ * @return Pointer to Timeline tracker for the job.
+ */
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_pp_scheduler.c b/drivers/gpu/mali/mali/common/mali_pp_scheduler.c
new file mode 100644
index 00000000..d77f4251
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pp_scheduler.c
@@ -0,0 +1,2134 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_session.h"
+#include "mali_pm_domain.h"
+#include "linux/mali/mali_utgard.h"
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+/* Queue type used for physical and virtual job queues. */
+struct mali_pp_scheduler_job_queue {
+ _MALI_OSK_LIST_HEAD(normal_pri); /* List of jobs with some unscheduled work. */
+ _MALI_OSK_LIST_HEAD(high_pri); /* List of high priority jobs with some unscheduled work. */
+ u32 depth; /* Depth of combined queues. */
+};
+
+/* If dma_buf with map on demand is used, we defer job deletion and job queue if in atomic context,
+ * since both might sleep. */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+static void mali_pp_scheduler_job_queued(void);
+static void mali_pp_scheduler_job_completed(mali_bool job_started);
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
+
+static mali_bool mali_pp_scheduler_is_suspended(void *data);
+
+static u32 pp_version = 0;
+
+/* Physical job queue */
+static struct mali_pp_scheduler_job_queue job_queue;
+
+/* Physical groups */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); /* List of physical groups with working jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); /* List of physical groups with idle jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); /* List of disabled physical groups */
+
+/* Virtual job queue (Mali-450 only) */
+static struct mali_pp_scheduler_job_queue virtual_group_job_queue;
+
+/**
+ * Add job to scheduler queue.
+ *
+ * @param job Job to queue.
+ * @return Schedule mask.
+ */
+static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job);
+
+/* Virtual group (Mali-450 only) */
+static struct mali_group *virtual_group = NULL; /* Virtual group (if any) */
+static enum {
+ VIRTUAL_GROUP_IDLE,
+ VIRTUAL_GROUP_WORKING,
+ VIRTUAL_GROUP_DISABLED,
+}
+virtual_group_state = VIRTUAL_GROUP_IDLE; /* Flag which indicates whether the virtual group is working or idle */
+
+/* Number of physical cores */
+static u32 num_cores = 0;
+
+/* Number of physical cores which are enabled */
+static u32 enabled_cores = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+static _mali_osk_spinlock_irq_t *pp_scheduler_lock = NULL;
+#else
+static _mali_osk_spinlock_t *pp_scheduler_lock = NULL;
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_lock(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_lock(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken.\n"));
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock.\n"));
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_unlock(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+#if defined(DEBUG)
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock)
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() do {} while (0)
+#endif /* defined(DEBUG) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
+static _mali_osk_spinlock_irq_t *pp_scheduler_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
+
+static void mali_pp_scheduler_deferred_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
+
+ /* This job object should not be on any lists. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
+
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
+
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
+}
+
+static void mali_pp_scheduler_do_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
+ * we start deleting the job objects (without any locks held
+ */
+ _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
+
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *pp_scheduler_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_queue_list);
+
+static void mali_pp_scheduler_deferred_job_queue(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
+ _mali_osk_list_addtail(&job->list, &pp_scheduler_job_queue_list);
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
+
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_job_queue);
+}
+
+static void mali_pp_scheduler_do_job_queue(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_IGNORE(arg);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
+
+ /*
+ * Quickly "unhook" the jobs pending to be queued, so we can release the lock before
+ * we start queueing the job objects (without any locks held)
+ */
+ _mali_osk_list_move_list(&pp_scheduler_job_queue_list, &list);
+
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
+ _mali_osk_list_delinit(&job->list);
+ schedule_mask |= mali_pp_scheduler_queue_job(job);
+ }
+
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
+{
+#if defined(CONFIG_MALI450)
+ return NULL != virtual_group;
+#else
+ return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue.high_pri);
+ job_queue.depth = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&virtual_group_job_queue.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&virtual_group_job_queue.high_pri);
+ virtual_group_job_queue.depth = 0;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ pp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#else
+ pp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ if (NULL == pp_scheduler_lock) goto cleanup;
+
+ pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == pp_scheduler_working_wait_queue) goto cleanup;
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
+ if (NULL == pp_scheduler_wq_job_delete) goto cleanup;
+
+ pp_scheduler_job_delete_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == pp_scheduler_job_delete_lock) goto cleanup;
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ pp_scheduler_wq_job_queue = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_queue, NULL);
+ if (NULL == pp_scheduler_wq_job_queue) goto cleanup;
+
+ pp_scheduler_job_queue_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == pp_scheduler_job_queue_lock) goto cleanup;
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+ return _MALI_OSK_ERR_OK;
+
+cleanup:
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ if (NULL != pp_scheduler_job_queue_lock) {
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
+ pp_scheduler_job_queue_lock = NULL;
+ }
+
+ if (NULL != pp_scheduler_wq_job_queue) {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
+ pp_scheduler_wq_job_queue = NULL;
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ if (NULL != pp_scheduler_job_delete_lock) {
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
+ pp_scheduler_job_delete_lock = NULL;
+ }
+
+ if (NULL != pp_scheduler_wq_job_delete) {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+ pp_scheduler_wq_job_delete = NULL;
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+ if (NULL != pp_scheduler_working_wait_queue) {
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ pp_scheduler_working_wait_queue = NULL;
+ }
+
+ if (NULL != pp_scheduler_lock) {
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ pp_scheduler_lock = NULL;
+ }
+
+ return _MALI_OSK_ERR_NOMEM;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+void mali_pp_scheduler_populate(void)
+{
+ struct mali_group *group;
+ struct mali_pp_core *pp_core;
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++) {
+ group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group)) {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Found virtual group %p.\n", group));
+
+ virtual_group = group;
+ break;
+ }
+ }
+
+ /* Find all the available PP cores */
+ for (i = 0; i < num_groups; i++) {
+ group = mali_group_get_glob_group(i);
+ pp_core = mali_group_get_pp_core(group);
+
+ if (NULL != pp_core && !mali_group_is_virtual(group)) {
+ if (0 == pp_version) {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ /* Add all physical PP cores to the virtual group */
+ mali_group_lock(virtual_group);
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+ mali_group_add_group(virtual_group, group, MALI_TRUE);
+ mali_group_unlock(virtual_group);
+ } else {
+ _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
+ }
+
+ num_cores++;
+ }
+ }
+
+ enabled_cores = num_cores;
+}
+
+void mali_pp_scheduler_depopulate(void)
+{
+ struct mali_group *group, *temp;
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+
+ /* Delete all groups owned by scheduler */
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_delete(virtual_group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ mali_group_delete(group);
+ }
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
+ mali_group_delete(group);
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+ if (mali_group_virtual_disable_if_empty(virtual_group)) {
+ MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ virtual_group_state = VIRTUAL_GROUP_DISABLED;
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+ if (mali_group_virtual_enable_if_empty(virtual_group)) {
+ MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
+
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+ }
+}
+
+static struct mali_pp_job *mali_pp_scheduler_get_job(struct mali_pp_scheduler_job_queue *queue)
+{
+ struct mali_pp_job *job = NULL;
+
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* Check if we have a normal priority job. */
+ if (!_mali_osk_list_empty(&queue->normal_pri)) {
+ MALI_DEBUG_ASSERT(queue->depth > 0);
+ job = _MALI_OSK_LIST_ENTRY(queue->normal_pri.next, struct mali_pp_job, list);
+ }
+
+ /* Prefer normal priority job if it is in progress. */
+ if (NULL != job && 0 < job->sub_jobs_started) {
+ return job;
+ }
+
+ /* Check if we have a high priority job. */
+ if (!_mali_osk_list_empty(&queue->high_pri)) {
+ MALI_DEBUG_ASSERT(queue->depth > 0);
+ job = _MALI_OSK_LIST_ENTRY(queue->high_pri.next, struct mali_pp_job, list);
+ }
+
+ return job;
+}
+
+/**
+ * Returns a physical job if a physical job is ready to run
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ return mali_pp_scheduler_get_job(&job_queue);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(job_queue.depth > 0);
+
+ /* Remove job from queue */
+ if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
+ /* All sub jobs have been started: remove job from queue */
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+ }
+
+ --job_queue.depth;
+}
+
+/**
+ * Returns a virtual job if a virtual job is ready to run
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_group_job(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+ return mali_pp_scheduler_get_job(&virtual_group_job_queue);
+}
+
+static void mali_pp_scheduler_dequeue_virtual_group_job(struct mali_pp_job *job)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(virtual_group_job_queue.depth > 0);
+
+ /* Remove job from queue */
+ if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+ --virtual_group_job_queue.depth;
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_pick_virtual_group_job(struct mali_pp_job *job,
+ u32 *first_subjob, u32 *last_subjob)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ MALI_DEBUG_ASSERT_POINTER(first_subjob);
+ MALI_DEBUG_ASSERT_POINTER(last_subjob);
+
+ MALI_DEBUG_ASSERT(virtual_group_job_queue.depth > 0);
+
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+ *first_subjob = *last_subjob =
+ mali_pp_job_get_first_unstarted_sub_job(job);
+
+ if (mali_pp_job_is_with_dlbu(job)) {
+ MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+ mali_pp_job_mark_sub_job_started(job, 0);
+ } else {
+ struct mali_group *child, *temp;
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &virtual_group->group_list, struct mali_group, group_list) {
+ if (mali_pp_job_has_unstarted_sub_jobs(job)) {
+ *last_subjob = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, *last_subjob);
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Virtual group is now working. */
+ virtual_group_state = VIRTUAL_GROUP_WORKING;
+
+ mali_pp_scheduler_dequeue_virtual_group_job(job);
+}
+
+
+/**
+ * Checks if the criteria is met for removing a physical core from virtual group
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ /*
+ * The criteria for taking out a physical group from a virtual group are the following:
+ * - There virtual group is idle
+ * - There are currently no physical groups (idle and working)
+ * - There are physical jobs to be scheduled
+ */
+ return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
+ _mali_osk_list_empty(&group_list_idle) &&
+ _mali_osk_list_empty(&group_list_working) &&
+ (NULL != mali_pp_scheduler_get_physical_job());
+}
+
+MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (!_mali_osk_list_empty(&group_list_idle)) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list.\n"));
+ return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+ } else if (mali_pp_scheduler_has_virtual_group()) {
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ if (mali_pp_scheduler_can_move_virtual_to_physical()) {
+ struct mali_group *group;
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group.\n"));
+ group = mali_group_acquire_group(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ return group;
+ }
+ }
+
+ return NULL;
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
+{
+ if (MALI_FALSE == mali_pp_job_use_no_notification(job)) {
+ u32 i;
+ u32 num_counters_to_copy;
+ mali_bool success = mali_pp_job_was_success(job);
+
+ _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_with_dlbu(job)) {
+ num_counters_to_copy = num_cores; /* Number of physical cores available */
+ } else {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++) {
+ jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+ jobres->perf_counter_src0 = mali_pp_job_get_pp_counter_global_src0();
+ jobres->perf_counter_src1 = mali_pp_job_get_pp_counter_global_src1();
+ }
+
+ mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ if (MALI_TRUE == deferred) {
+ /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
+ mali_pp_scheduler_deferred_job_delete(job);
+ } else {
+ mali_pp_job_delete(job);
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
+ mali_pp_job_delete(job);
+#endif
+}
+
+static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job, mali_bool job_started)
+{
+ /* This job object should not be on any lists. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ /* Send notification back to user space */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
+#else
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+#endif
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ if (_MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE & job->uargs.flags) {
+ _mali_osk_atomic_inc(&job->session->number_of_window_jobs);
+ }
+#endif
+
+ mali_pp_scheduler_job_completed(job_started);
+}
+
+void mali_pp_scheduler_schedule(void)
+{
+ struct mali_group *physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+ struct mali_pp_job *physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+ u32 physical_sub_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+ int num_physical_jobs_to_start = 0;
+ int i;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ /* Lock the virtual group since we might have to grab physical groups. */
+ mali_group_lock(virtual_group);
+ }
+
+ mali_pp_scheduler_lock();
+ if (pause_count > 0) {
+ /* Scheduler is suspended, don't schedule any jobs. */
+ mali_pp_scheduler_unlock();
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_unlock(virtual_group);
+ }
+ return;
+ }
+
+ /* Find physical job(s) to schedule first. */
+ while (1) {
+ struct mali_group *group;
+ struct mali_pp_job *job;
+ u32 sub_job;
+
+ job = mali_pp_scheduler_get_physical_job();
+ if (NULL == job) {
+ break; /* No job, early out. */
+ }
+
+ if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) &&
+ mali_pp_job_is_large_and_unstarted(job) && !_mali_osk_list_empty(&group_list_working)) {
+ /* Since not all groups are idle, don't schedule yet. */
+ break;
+ }
+
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+ MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
+
+ /* Acquire a physical group, either from the idle list or from the virtual group.
+ * In case the group was acquired from the virtual group, it's state will be
+ * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
+ group = mali_pp_scheduler_acquire_physical_group();
+ if (NULL == group) {
+ /* Could not get a group to run the job on, early out. */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
+ break;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p.\n", group));
+
+ /* Mark sub job as started. */
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Remove job from queue (if this was the last sub job). */
+ mali_pp_scheduler_dequeue_physical_job(job);
+
+ /* Move group to working list. */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
+
+ /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding. */
+ physical_groups_to_start[num_physical_jobs_to_start] = group;
+ physical_jobs_to_start[num_physical_jobs_to_start] = job;
+ physical_sub_jobs_to_start[num_physical_jobs_to_start] = sub_job;
+ ++num_physical_jobs_to_start;
+
+ MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
+ /* We have a virtual group and it is idle. */
+
+ struct mali_pp_job *job;
+
+ /* Find a virtual job we can start. */
+ job = mali_pp_scheduler_get_virtual_group_job();
+
+ if (NULL != job) {
+ u32 first_subjob, last_subjob;
+
+ /* To mark necessary subjobs status of this job and remove the job from job queue
+ * when all the subjobs will be run by this virtual group in one go
+ */
+ mali_pp_scheduler_pick_virtual_group_job(job, &first_subjob, &last_subjob);
+
+ /* We no longer need the scheduler lock, but we still need the virtual lock
+ * in order to start the virtual job.
+ */
+ mali_pp_scheduler_unlock();
+
+ /* Start job. */
+ mali_group_start_job_on_virtual(virtual_group, job, first_subjob, last_subjob);
+ } else {
+ /* No virtual job to start. */
+ mali_pp_scheduler_unlock();
+ }
+ } else {
+ /* We have a virtual group, but it is busy or disabled. */
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE != virtual_group_state);
+
+ mali_pp_scheduler_unlock();
+ }
+ mali_group_unlock(virtual_group);
+ } else {
+ /* There is no virtual group. */
+ mali_pp_scheduler_unlock();
+ }
+
+ /* We have now released the scheduler lock, and we are ready to start the physical jobs.
+ * The reason we want to wait until we have released the scheduler lock is that job start
+ * may take quite a bit of time (many registers have to be written). This will allow new
+ * jobs from user space to come in, and post-processing of other PP jobs to happen at the
+ * same time as we start jobs. */
+ for (i = 0; i < num_physical_jobs_to_start; i++) {
+ struct mali_group *group = physical_groups_to_start[i];
+ struct mali_pp_job *job = physical_jobs_to_start[i];
+ u32 sub_job = physical_sub_jobs_to_start[i];
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+
+ mali_group_lock(group);
+
+ /* Set state to IDLE if group was acquired from the virtual group. */
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ mali_group_start_job_on_group(group, job, sub_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule).\n",
+ mali_pp_job_get_id(job), job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job)));
+
+ mali_group_unlock(group);
+ }
+}
+
+/**
+ * Set group idle.
+ *
+ * If @ref group is the virtual group, nothing is done since the virtual group should be idle
+ * already.
+ *
+ * If @ref group is a physical group we rejoin the virtual group, if it exists. If not, we move the
+ * physical group to the idle list.
+ *
+ * @note The group and the scheduler must both be locked when entering this function. Both will be
+ * unlocked before exiting.
+ *
+ * @param group The group to set idle.
+ */
+static void mali_pp_scheduler_set_group_idle_and_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ if (mali_group_is_virtual(group)) {
+ /* The virtual group should have been set to non-working already. */
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ return;
+ } else {
+ if (mali_pp_scheduler_has_virtual_group()) {
+ /* Rejoin virtual group. */
+
+ /* We're no longer needed on the scheduler list. */
+ _mali_osk_list_delinit(&(group->pp_scheduler_list));
+
+ /* Make sure no interrupts are handled for this group during the transition
+ * from physical to virtual. */
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ mali_group_lock(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_pp_scheduler_enable_empty_virtual();
+ }
+
+ /* We need to recheck the group state since it is possible that someone has
+ * modified the group before we locked the virtual group. */
+ if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
+ mali_group_add_group(virtual_group, group, MALI_TRUE);
+ }
+
+ mali_group_unlock(virtual_group);
+ } else {
+ /* Move physical group back to idle list. */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+ }
+ }
+}
+
+/**
+ * Schedule job on locked group.
+ *
+ * @note The group and the scheduler must both be locked when entering this function. Both will be
+ * unlocked before exiting.
+ *
+ * @param group The group to schedule on.
+ */
+static void mali_pp_scheduler_schedule_on_group_and_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ if (mali_group_is_virtual(group)) {
+ /* Now that the virtual group is idle, check if we should reconfigure. */
+
+ struct mali_pp_job *virtual_job = NULL;
+ struct mali_pp_job *physical_job = NULL;
+ struct mali_group *physical_group = NULL;
+ u32 physical_sub_job = 0;
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ if (mali_pp_scheduler_can_move_virtual_to_physical()) {
+ /* There is a runnable physical job and we can acquire a physical group. */
+ physical_job = mali_pp_scheduler_get_physical_job();
+ MALI_DEBUG_ASSERT_POINTER(physical_job);
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
+
+ /* Mark sub job as started. */
+ physical_sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
+ mali_pp_job_mark_sub_job_started(physical_job, physical_sub_job);
+
+ /* Remove job from queue (if this was the last sub job). */
+ mali_pp_scheduler_dequeue_physical_job(physical_job);
+
+ /* Acquire a physical group from the virtual group. Its state will
+ * be LEAVING_VIRTUAL and must be set to IDLE before it can be
+ * used. */
+ physical_group = mali_group_acquire_group(virtual_group);
+
+ /* Move physical group to the working list, as we will soon start a job on it. */
+ _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
+
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ /* Get next virtual job. */
+ virtual_job = mali_pp_scheduler_get_virtual_group_job();
+ if (NULL != virtual_job && VIRTUAL_GROUP_IDLE == virtual_group_state) {
+ u32 first_subjob, last_subjob;
+ /* To mark necessary subjobs status of this job and remove the job from job queue
+ * when all the subjobs will be run by this virtual group in one go
+ */
+ mali_pp_scheduler_pick_virtual_group_job(virtual_job, &first_subjob, &last_subjob);
+
+ /* We no longer need the scheduler lock, but we still need the virtual lock
+ * in order to start the virtual job.
+ */
+ mali_pp_scheduler_unlock();
+
+ /* Start job. */
+ mali_group_start_job_on_virtual(group, virtual_job, first_subjob, last_subjob);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done).\n",
+ mali_pp_job_get_id(virtual_job), virtual_job, 1,
+ mali_pp_job_get_sub_job_count(virtual_job)));
+ } else {
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
+#endif
+
+ mali_pp_scheduler_unlock();
+ }
+
+ /* Releasing the virtual group lock that was held when entering the function. */
+ mali_group_unlock(group);
+
+ /* Start a physical job (if we acquired a physical group earlier). */
+ if (NULL != physical_job && NULL != physical_group) {
+ mali_group_lock(physical_group);
+
+ /* Change the group state from LEAVING_VIRTUAL to IDLE to complete the transition. */
+ physical_group->state = MALI_GROUP_STATE_IDLE;
+
+ /* Start job. */
+ mali_group_start_job_on_group(physical_group, physical_job, physical_sub_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
+ mali_pp_job_get_id(physical_job), physical_job, physical_sub_job + 1,
+ mali_pp_job_get_sub_job_count(physical_job)));
+
+ mali_group_unlock(physical_group);
+ }
+ } else {
+ /* Physical group. */
+ struct mali_pp_job *job = NULL;
+ u32 sub_job = 0;
+
+ job = mali_pp_scheduler_get_physical_job();
+ if (NULL != job) {
+ /* There is a runnable physical job. */
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+ /* Mark sub job as started. */
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Remove job from queue (if this was the last sub job). */
+ mali_pp_scheduler_dequeue_physical_job(job);
+
+ mali_pp_scheduler_unlock();
+
+ /* Group is already on the working list, so start the new job. */
+ mali_group_start_job_on_group(group, job, sub_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
+ mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+
+ mali_group_unlock(group);
+ } else {
+ mali_pp_scheduler_set_group_idle_and_unlock(group);
+ }
+ }
+}
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half)
+{
+ mali_bool job_is_done = MALI_FALSE;
+ mali_bool schedule_on_group = MALI_FALSE;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s).\n",
+ mali_pp_job_is_virtual_group_job(job) ? "Virtual Group" : "Physical",
+ mali_pp_job_get_id(job),
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ success ? "success" : "failure"));
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ mali_pp_scheduler_lock();
+
+ if (mali_group_is_virtual(group) && !mali_pp_job_is_with_dlbu(job)) {
+ u32 subjobs;
+
+ /* Get how many subjobs are running parallel in this virtual group */
+ subjobs = mali_pp_job_get_first_unstarted_sub_job(job) - group->pp_running_sub_job;
+ MALI_DEBUG_ASSERT(subjobs > 0);
+
+ for (; 0 < subjobs; subjobs--) {
+ mali_pp_job_mark_sub_job_completed(job, success);
+ }
+
+ mali_group_non_dlbu_job_done_virtual(group);
+ } else {
+ mali_pp_job_mark_sub_job_completed(job, success);
+ }
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job) == mali_group_is_virtual(group));
+
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done) {
+ /* Some groups added into the virtual group may take some subjobs to run but
+ * without dequeuing the job, here do necessary dequeue under scheduler lock
+ */
+ if (mali_group_is_virtual(group) && !mali_pp_job_is_with_dlbu(job)) {
+ if (!_mali_osk_list_empty(&job->list)) {
+ mali_pp_scheduler_dequeue_virtual_group_job(job);
+ }
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ /* Remove job from session list. */
+ _mali_osk_list_delinit(&job->session_list);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X).\n",
+ mali_pp_job_is_virtual_group_job(job) ? "virtual group" : "physical",
+ mali_pp_job_get_id(job), job));
+
+ mali_pp_scheduler_unlock();
+
+ /* Release tracker. If other trackers are waiting on this tracker, this could
+ * trigger activation. The returned scheduling mask can be used to determine if we
+ * have to schedule GP, PP or both. */
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+ mali_pp_scheduler_lock();
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Obey the policy. */
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+ }
+
+ /* If paused, then this was the last job, so wake up sleeping workers and return. */
+ if (pause_count > 0) {
+ /* Wake up sleeping workers. Their wake-up condition is that
+ * num_slots == num_slots_idle, so unless we are done working, no
+ * threads will actually be woken up.
+ */
+ if (!mali_group_is_virtual(group)) {
+ /* Move physical group to idle list. */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+
+ _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ if (job_is_done) {
+ /* Return job to user and delete it. */
+ mali_pp_scheduler_finalize_job(job, MALI_TRUE);
+ }
+
+ /* A GP job might be queued by tracker release above,
+ * make sure GP scheduler has a chance to schedule this (if possible)
+ */
+ mali_scheduler_schedule_from_mask(schedule_mask & ~MALI_SCHEDULER_MASK_PP, in_upper_half);
+
+ return;
+ }
+
+ /* Since this group just finished running a job, we can reschedule a new job on it
+ * immediately. */
+
+ /* By default, don't schedule on group. */
+ schedule_on_group = MALI_FALSE;
+
+ if (mali_group_is_virtual(group)) {
+ /* Always schedule immediately on virtual group. */
+ schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+ schedule_on_group = MALI_TRUE;
+ } else if (0 < job_queue.depth && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP) || _mali_osk_list_empty(&group_list_idle))) {
+ struct mali_pp_job *next_job = NULL;
+
+ next_job = mali_pp_scheduler_get_physical_job();
+ MALI_DEBUG_ASSERT_POINTER(next_job);
+
+ /* If no new jobs have been queued or if this group is the only idle group, we can
+ * schedule immediately on this group, unless we are GP bound and the next job would
+ * benefit from all its sub jobs being started concurrently. */
+
+ if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) && mali_pp_job_is_large_and_unstarted(next_job)) {
+ /* We are GP bound and the job would benefit from all sub jobs being started
+ * concurrently. Postpone scheduling until after group has been unlocked. */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ schedule_on_group = MALI_FALSE;
+ } else {
+ /* Schedule job immediately since we are not GP bound. */
+ schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+ schedule_on_group = MALI_TRUE;
+ }
+ } else if ((0 < virtual_group_job_queue.depth) && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP))) {
+ /* This case is rare, only in virtual job has sub jobs case,
+ * the last "pilot job" here might not be the real pilot job,
+ * it may be a real pp job with only 1 subjob i.e. only 1 region inform,
+ * so this job may not trigger any virtual job queued in virtual queue,
+ * so it may suspend the pp scheduler, even when there are already
+ * some jobs in the virtual queue, so in this case we need to explicit
+ * set the schedule_mask */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ }
+
+ if (schedule_on_group) {
+ /* Schedule a new job on this group. */
+ mali_pp_scheduler_schedule_on_group_and_unlock(group);
+ } else {
+ /* Set group idle. Will rejoin virtual group, under appropriate conditions. */
+ mali_pp_scheduler_set_group_idle_and_unlock(group);
+ }
+
+ if (!schedule_on_group || MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
+ if (MALI_SCHEDULER_MASK_PP & schedule_mask) {
+ /* Schedule PP directly. */
+ mali_pp_scheduler_schedule();
+ schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+ }
+
+ /* Schedule other jobs that were activated. */
+ mali_scheduler_schedule_from_mask(schedule_mask, in_upper_half);
+ }
+
+ if (job_is_done) {
+ /* Return job to user and delete it. */
+ mali_pp_scheduler_finalize_job(job, MALI_TRUE);
+ }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_pp_scheduler_unlock();
+
+ /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+ * mali_pp_scheduler_suspended() function will be called. This will return true
+ * if state is idle and pause_count > 0, so if the core is active this
+ * will not do anything.
+ */
+ _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended, NULL);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ mali_pp_scheduler_unlock();
+ if (0 == pause_count) {
+ mali_pp_scheduler_schedule();
+ }
+}
+
+static mali_timeline_point mali_pp_scheduler_submit_job(struct mali_session_data *session, struct mali_pp_job *job)
+{
+ mali_timeline_point point;
+ u32 fb_lookup_id = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_pp_scheduler_lock();
+
+ fb_lookup_id = mali_pp_job_get_fb_lookup_id(job);
+ MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+ /* Adding job to the lookup list used to quickly discard writeback units of queued jobs. */
+ _mali_osk_list_addtail(&job->session_fb_lookup_list, &session->pp_job_fb_lookup_list[fb_lookup_id]);
+
+ mali_pp_scheduler_unlock();
+
+ /* We hold a PM reference for every job we hold queued (and running) */
+ _mali_osk_pm_dev_ref_add();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_PP);
+
+ return point;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ mali_timeline_point point;
+ u32 __user *timeline_point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+
+ point = mali_pp_scheduler_submit_job(session, job);
+ job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+ /* Let user space know that something failed after the job was started. */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ _mali_uk_pp_and_gp_start_job_s kargs;
+ struct mali_pp_job *pp_job;
+ struct mali_gp_job *gp_job;
+ u32 __user *timeline_point_ptr = NULL;
+ mali_timeline_point point;
+ _mali_uk_pp_start_job_s __user *pp_args;
+ _mali_uk_gp_start_job_s __user *gp_args;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+
+ session = (struct mali_session_data *) ctx;
+
+ if (0 != _mali_osk_copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+ gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+ pp_job = mali_pp_job_create(session, pp_args, mali_scheduler_get_new_id());
+ if (NULL == pp_job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_job = mali_gp_job_create(session, gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
+ if (NULL == gp_job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ mali_pp_job_delete(pp_job);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ timeline_point_ptr = (u32 __user *)(uintptr_t)pp_job->uargs.timeline_point_ptr;
+
+ /* Submit GP job. */
+ mali_gp_scheduler_submit_job(session, gp_job);
+ gp_job = NULL;
+
+ /* Submit PP job. */
+ point = mali_pp_scheduler_submit_job(session, pp_job);
+ pp_job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+ /* Let user space know that something failed after the jobs were started. */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ args->number_of_total_cores = num_cores;
+ args->number_of_enabled_cores = enabled_cores;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_pp_scheduler_get_num_cores_total(void)
+{
+ return num_cores;
+}
+
+u32 mali_pp_scheduler_get_num_cores_enabled(void)
+{
+ return enabled_cores;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ args->version = pp_version;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ u32 fb_lookup_id;
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+ mali_pp_scheduler_lock();
+
+ /* Iterate over all jobs for given frame builder_id. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pp_job_fb_lookup_list[fb_lookup_id], struct mali_pp_job, session_fb_lookup_list) {
+ MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+ if (mali_pp_job_get_frame_builder_id(job) == (u32) args->fb_id) {
+ MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+ if (args->wb0_memory == job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
+ MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+ mali_pp_job_disable_wb0(job);
+ }
+ if (args->wb1_memory == job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
+ MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+ mali_pp_job_disable_wb1(job);
+ }
+ if (args->wb2_memory == job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
+ MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+ mali_pp_job_disable_wb2(job);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", disable_mask));
+ } else {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+ }
+ }
+
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+ u32 i = 0;
+ struct mali_pp_job *job, *tmp_job;
+ struct mali_group *group, *tmp_group;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08X.\n", session));
+
+ mali_pp_scheduler_lock();
+
+ /* Find all jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->pp_job_list, struct mali_pp_job, session_list) {
+ /* Remove job from queue. */
+ if (mali_pp_job_is_virtual_group_job(job)) {
+ if (mali_pp_job_has_unstarted_sub_jobs(job))
+ --virtual_group_job_queue.depth;
+ } else {
+ job_queue.depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
+ }
+
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+
+ mali_pp_job_mark_unstarted_failed(job);
+
+ if (mali_pp_job_is_complete(job)) {
+ /* Job is complete, remove from session list. */
+ _mali_osk_list_delinit(&job->session_list);
+
+ /* Move job to local list for release and deletion. */
+ _mali_osk_list_add(&job->list, &removed_jobs);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborted PP job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Keeping partially started PP job %u (0x%08X) in session.\n", mali_pp_job_get_id(job), job));
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+
+ mali_pp_scheduler_unlock();
+
+ /* Release and delete all found jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
+ mali_timeline_tracker_release(&job->tracker);
+ mali_pp_job_delete(job);
+ mali_pp_scheduler_job_completed(MALI_TRUE);
+ }
+
+ /* Abort any running jobs from the session. */
+ while (i > 0) {
+ mali_group_abort_session(groups[--i], session);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_abort_session(virtual_group, session);
+ }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_pp_scheduler_lock();
+
+ ret = pause_count > 0
+ && _mali_osk_list_empty(&group_list_working)
+ && VIRTUAL_GROUP_WORKING != virtual_group_state;
+
+ mali_pp_scheduler_unlock();
+
+ return ret;
+}
+
+struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void)
+{
+ if (mali_pp_scheduler_has_virtual_group()) {
+ return mali_group_get_pp_core(virtual_group);
+ } else {
+ return NULL;
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue.normal_pri) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue.high_pri) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ n += mali_group_dump_state(virtual_group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+ return n;
+}
+#endif
+
+/* This function is intended for power on reset of all cores.
+ * No locking is done for the list iteration, which can only be safe if the
+ * scheduler is paused and all cores idle. That is always the case on init and
+ * power on. */
+void mali_pp_scheduler_reset_all_groups(void)
+{
+ struct mali_group *group, *temp;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_lock(virtual_group);
+ mali_group_reset(virtual_group);
+ mali_group_unlock(virtual_group);
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+ mali_pp_scheduler_unlock();
+
+ while (i > 0) {
+ group = groups[--i];
+
+ mali_group_lock(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+ }
+}
+
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group, *temp;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_zap_session(virtual_group, session);
+ }
+
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+ mali_pp_scheduler_unlock();
+
+ while (i > 0) {
+ mali_group_zap_session(groups[--i], session);
+ }
+}
+
+/* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
+ * before calling this function to avoid Mali powering down as HW is accessed.
+ */
+static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_lock(group);
+
+ if (MALI_GROUP_STATE_DISABLED != group->state) {
+ mali_group_unlock(group);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled.\n", group));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p.\n", group));
+
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+ ++enabled_cores;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_bool update_hw;
+
+ /* Add group to virtual group. */
+ _mali_osk_list_delinit(&(group->pp_scheduler_list));
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ mali_group_lock(virtual_group);
+
+ update_hw = mali_pm_is_power_on();
+ /* Get ref of group domain */
+ mali_group_get_pm_domain_ref(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+ MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+ if (update_hw) {
+ mali_group_lock(group);
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+ }
+
+ mali_pp_scheduler_enable_empty_virtual();
+ mali_group_add_group(virtual_group, group, update_hw);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Added to virtual group.\n", group));
+
+ mali_group_unlock(virtual_group);
+ } else {
+ /* Get ref of group domain */
+ mali_group_get_pm_domain_ref(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+ MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+ /* Put group on idle list. */
+ if (mali_pm_is_power_on()) {
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+ }
+
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Now on idle list.\n", group));
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+ }
+}
+
+void mali_pp_scheduler_enable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ mali_pp_scheduler_enable_group_internal(group);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ /* Pick up any jobs that might have been queued if all PP groups were disabled. */
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
+{
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_lock(virtual_group);
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+ if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
+ /* The group was in the process of being added to the virtual group. We
+ * only need to change the state to reverse this. */
+ group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+ } else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
+ /* Remove group from virtual group. The state of the group will be
+ * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
+ mali_group_remove_group(virtual_group, group);
+
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ mali_group_unlock(virtual_group);
+ }
+
+ mali_group_lock(group);
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled.\n", group));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p.\n", group));
+
+ --enabled_cores;
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
+ group->state = MALI_GROUP_STATE_DISABLED;
+
+ mali_group_power_off_group(group, MALI_TRUE);
+ mali_group_put_pm_domain_ref(group);
+ }
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+}
+
+void mali_pp_scheduler_disable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_pp_scheduler_suspend();
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ mali_pp_scheduler_disable_group_internal(group);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_resume();
+}
+
+static void mali_pp_scheduler_notify_core_change(u32 num_cores)
+{
+ mali_bool done = MALI_FALSE;
+
+ if (mali_is_mali450()) {
+ return;
+ }
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
+{
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ _mali_osk_pm_dev_barrier();
+
+ while (target_core_nr > enabled_cores) {
+ /*
+ * If there are any cores which do not belong to any domain,
+ * then these will always be found at the head of the list and
+ * we'll thus enabled these first.
+ */
+
+ mali_pp_scheduler_lock();
+
+ if (!_mali_osk_list_empty(&group_list_disabled)) {
+ struct mali_group *group;
+
+ group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+ mali_pp_scheduler_unlock();
+
+ mali_pp_scheduler_enable_group_internal(group);
+ } else {
+ mali_pp_scheduler_unlock();
+ break; /* no more groups on disabled list */
+ }
+ }
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
+{
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+
+ mali_pp_scheduler_suspend();
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ if (NULL != mali_pmu_get_global_pmu_core()) {
+ int i;
+
+ for (i = MALI_MAX_NUMBER_OF_DOMAINS - 1; i >= 0; i--) {
+ if (target_core_nr < enabled_cores) {
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && (NULL != domain->group_list)) {
+ struct mali_group *group;
+
+ MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) {
+ /* If group is pp core */
+ if (NULL != mali_group_get_pp_core(group)) {
+ mali_pp_scheduler_disable_group_internal(group);
+ if (target_core_nr >= enabled_cores) {
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ }
+
+ /*
+ * Didn't find enough cores associated with a power domain,
+ * so we need to disable cores which we can't power off with the PMU.
+ * Start with physical groups used by the scheduler,
+ * then remove physical from virtual if even more groups are needed.
+ */
+
+ while (target_core_nr < enabled_cores) {
+ mali_pp_scheduler_lock();
+ if (!_mali_osk_list_empty(&group_list_idle)) {
+ struct mali_group *group;
+
+ group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_pp_scheduler_unlock();
+
+ mali_pp_scheduler_disable_group_internal(group);
+ } else {
+ mali_pp_scheduler_unlock();
+ break; /* No more physical groups */
+ }
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ while (target_core_nr < enabled_cores) {
+ mali_group_lock(virtual_group);
+ if (!_mali_osk_list_empty(&virtual_group->group_list)) {
+ struct mali_group *group;
+
+ group = _MALI_OSK_LIST_ENTRY(virtual_group->group_list.next, struct mali_group, group_list);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_unlock(virtual_group);
+
+ mali_pp_scheduler_disable_group_internal(group);
+ } else {
+ mali_group_unlock(virtual_group);
+ break; /* No more physical groups in virtual group */
+ }
+ }
+ }
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_resume();
+}
+
+int mali_pp_scheduler_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+ if (target_core_nr == enabled_cores) return 0;
+ if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+ if (target_core_nr > num_cores) return -EINVAL;
+ if (0 == target_core_nr) return -EINVAL;
+
+ if (target_core_nr > enabled_cores) {
+ mali_pp_scheduler_core_scale_up(target_core_nr);
+ } else if (target_core_nr < enabled_cores) {
+ mali_pp_scheduler_core_scale_down(target_core_nr);
+ }
+
+ if (target_core_nr != enabled_cores) {
+ MALI_DEBUG_PRINT(2, ("Core scaling failed, target number: %d, actual number: %d\n", target_core_nr, enabled_cores));
+ }
+
+ mali_pp_scheduler_notify_core_change(enabled_cores);
+
+ return 0;
+}
+
+void mali_pp_scheduler_core_scaling_enable(void)
+{
+ /* PS: Core scaling is by default enabled */
+ core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_pp_scheduler_core_scaling_disable(void)
+{
+ core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_pp_scheduler_core_scaling_is_enabled(void)
+{
+ return core_scaling_enabled;
+}
+
+static void mali_pp_scheduler_job_queued(void)
+{
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
+ * This will be fine because we only loose the tiny idle gap between jobs, but
+ * we will instead get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+}
+
+static void mali_pp_scheduler_job_completed(mali_bool job_started)
+{
+ /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
+ _mali_osk_pm_dev_ref_dec();
+
+ if (mali_utilization_enabled() && job_started) {
+ mali_utilization_pp_end();
+ }
+}
+
+static void mali_pp_scheduler_abort_job_and_unlock_scheduler(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ /* This job should not be on any lists. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+
+ mali_pp_scheduler_unlock();
+
+ /* Release tracker. */
+ mali_timeline_tracker_release(&job->tracker);
+}
+
+static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job)
+{
+ _mali_osk_list_t *queue = NULL;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_pp_job *iter, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_dma_buf_map_job(job);
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+ mali_pp_scheduler_lock();
+
+ if (unlikely(job->session->is_aborting)) {
+ /* Before checking if the session is aborting, the scheduler must be locked. */
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job));
+
+ mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
+
+ /* Delete job. */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ mali_pp_scheduler_deferred_job_delete(job);
+#else
+ mali_pp_job_delete(job);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+ /* Release the PM reference taken for the job in
+ * mali_pp_scheduler_submit_job(). */
+ _mali_osk_pm_dev_ref_dec();
+
+ /* Since we are aborting we ignore the scheduler mask. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_pp_scheduler_job_queued();
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
+#endif
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
+
+ job->cache_order = mali_scheduler_get_new_cache_order();
+
+ /* Determine which queue the job should be added to. */
+ if (mali_pp_job_is_virtual_group_job(job)) {
+ if (job->session->use_high_priority_job_queue) {
+ queue = &virtual_group_job_queue.high_pri;
+ } else {
+ queue = &virtual_group_job_queue.normal_pri;
+ }
+
+ virtual_group_job_queue.depth += 1;
+
+ /* Set schedule bitmask if the virtual group is idle. */
+ if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ }
+ } else {
+ if (job->session->use_high_priority_job_queue) {
+ queue = &job_queue.high_pri;
+ } else {
+ queue = &job_queue.normal_pri;
+ }
+
+ job_queue.depth += mali_pp_job_get_sub_job_count(job);
+
+ /* Set schedule bitmask if there are physical PP cores available, or if there is an
+ * idle virtual group. */
+ if (!_mali_osk_list_empty(&group_list_idle)
+ || (mali_pp_scheduler_has_virtual_group()
+ && (VIRTUAL_GROUP_IDLE == virtual_group_state))) {
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ }
+ }
+
+ /* Find position in queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_pp_job, list) {
+ if (mali_pp_job_should_start_after(job, iter)) {
+ break;
+ }
+ }
+
+ /* Add job to queue. */
+ _mali_osk_list_add(&job->list, &iter->list);
+
+ /* Add job to session list. */
+ _mali_osk_list_addtail(&job->session_list, &(job->session->pp_job_list));
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+ mali_pp_job_is_virtual_group_job(job) ? "Virtual Group" : "Physical",
+ mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+ mali_pp_scheduler_unlock();
+
+ return schedule_mask;
+}
+
+mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
+
+ if (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & job->tracker.activation_error) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", mali_pp_job_get_id(job), job));
+
+ mali_pp_scheduler_lock();
+ mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
+
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_finalize_job(job, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ /* PP job is ready to run, queue it. */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_pp_scheduler_deferred_job_queue(job);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+ schedule_mask = mali_pp_scheduler_queue_job(job);
+
+ return schedule_mask;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_pp_scheduler.h b/drivers/gpu/mali/mali/common/mali_pp_scheduler.h
new file mode 100644
index 00000000..618152ab
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_pp_scheduler.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "linux/mali/mali_utgard.h"
+
+/** Initalize the HW independent parts of the PP scheduler
+ */
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+/** Poplulate the PP scheduler with groups
+ */
+void mali_pp_scheduler_populate(void);
+void mali_pp_scheduler_depopulate(void);
+
+/**
+ * @brief Handle job completion.
+ *
+ * Will attempt to start a new job on the locked group.
+ *
+ * If all sub jobs have completed the job's tracker will be released, any other resources associated
+ * with the job will be freed. A notification will also be sent to user space.
+ *
+ * Releasing the tracker might activate other jobs, so if appropriate we also schedule them.
+ *
+ * @note Group must be locked when entering this function. Will be unlocked before exiting.
+ *
+ * @param group The group that completed the job.
+ * @param job The job that is done.
+ * @param sub_job Sub job of job.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @param in_upper_half MALI_TRUE if called from upper half, MALI_FALSE if not.
+ */
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+/**
+ * @brief Abort all running and queued PP jobs from session.
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the
+ * queue and jobs currently running on a core will be aborted.
+ *
+ * @param session Session that is aborting.
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the PP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ *
+ * This function is intended for power on reset of all cores.
+ * No locking is done, which can only be safe if the scheduler is paused and
+ * all cores idle. That is always the case on init and power on.
+ */
+void mali_pp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
+
+/**
+ * @brief Get the virtual PP core
+ *
+ * The returned PP core may only be used to prepare DMA command buffers for the
+ * PP core. Other actions must go through the PP scheduler, or the virtual
+ * group.
+ *
+ * @return Pointer to the virtual PP core, NULL if this doesn't exist
+ */
+struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void);
+
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_pp_scheduler_enable_group(struct mali_group *group);
+void mali_pp_scheduler_disable_group(struct mali_group *group);
+
+/**
+ * @brief Used by the Timeline system to queue a PP job.
+ *
+ * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
+ * call.
+ */
+mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job);
+
+/**
+ * @brief Schedule queued jobs on idle cores.
+ */
+void mali_pp_scheduler_schedule(void);
+
+int mali_pp_scheduler_set_perf_level(u32 cores, mali_bool override);
+
+void mali_pp_scheduler_core_scaling_enable(void);
+void mali_pp_scheduler_core_scaling_disable(void);
+mali_bool mali_pp_scheduler_core_scaling_is_enabled(void);
+
+u32 mali_pp_scheduler_get_num_cores_total(void);
+u32 mali_pp_scheduler_get_num_cores_enabled(void);
+
+/**
+ * @brief Returns the number of Pixel Processors in the system irrespective of the context
+ *
+ * @return number of physical Pixel Processor cores in the system
+ */
+u32 mali_pp_scheduler_get_num_cores_total(void);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_scheduler.c b/drivers/gpu/mali/mali/common/mali_scheduler.c
new file mode 100644
index 00000000..8940653d
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_scheduler.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_scheduler.h"
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+
+static _mali_osk_atomic_t mali_job_id_autonumber;
+static _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_high_pri = NULL;
+static _mali_osk_wq_work_t *gp_scheduler_wq_high_pri = NULL;
+
+static void mali_scheduler_wq_schedule_pp(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_scheduler_wq_schedule_gp(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mali_gp_scheduler_schedule();
+}
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job cache order counter failed.\n"));
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL);
+ if (NULL == pp_scheduler_wq_high_pri) {
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL);
+ if (NULL == gp_scheduler_wq_high_pri) {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+ _mali_osk_wq_delete_work(gp_scheduler_wq_high_pri);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+ u32 job_id = _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+ return job_id;
+}
+
+u32 mali_scheduler_get_new_cache_order(void)
+{
+ u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
+ return job_cache_order;
+}
+
+void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+ if (MALI_SCHEDULER_MASK_GP & mask) {
+ /* GP needs scheduling. */
+ if (deferred_schedule) {
+ /* Schedule GP deferred. */
+ _mali_osk_wq_schedule_work_high_pri(gp_scheduler_wq_high_pri);
+ } else {
+ /* Schedule GP now. */
+ mali_gp_scheduler_schedule();
+ }
+ }
+
+ if (MALI_SCHEDULER_MASK_PP & mask) {
+ /* PP needs scheduling. */
+ if (deferred_schedule) {
+ /* Schedule PP deferred. */
+ _mali_osk_wq_schedule_work_high_pri(pp_scheduler_wq_high_pri);
+ } else {
+ /* Schedule PP now. */
+ mali_pp_scheduler_schedule();
+ }
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_scheduler.h b/drivers/gpu/mali/mali/common/mali_scheduler.h
new file mode 100644
index 00000000..73eb53e5
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_scheduler.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+u32 mali_scheduler_get_new_cache_order(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the both the PP and GP scheuduler.
+ * This must be called after the Mali HW has been powered on in order to reset
+ * the HW.
+ */
+MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+{
+ mali_gp_scheduler_reset_all_groups();
+ mali_pp_scheduler_reset_all_groups();
+}
+
+/**
+ * @brief Zap TLB on all active groups running \a session
+ *
+ * @param session Pointer to the session to zap
+ */
+MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ mali_gp_scheduler_zap_all_active(session);
+ mali_pp_scheduler_zap_all_active(session);
+}
+
+/**
+ * Check if bit is set in scheduler mask.
+ *
+ * @param mask Scheduler mask to check.
+ * @param bit Bit to check.
+ * @return MALI_TRUE if bit is set in scheduler mask, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_scheduler_mask_is_set(mali_scheduler_mask mask, mali_scheduler_mask bit)
+{
+ return MALI_SCHEDULER_MASK_EMPTY != (bit & mask);
+}
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+/* Enable or disable scheduler hint. */
+extern mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+
+MALI_STATIC_INLINE void mali_scheduler_hint_enable(mali_scheduler_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+ mali_scheduler_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_scheduler_hint_disable(mali_scheduler_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+ mali_scheduler_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_scheduler_hint_is_enabled(mali_scheduler_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+ return mali_scheduler_hints[hint];
+}
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_scheduler_types.h b/drivers/gpu/mali/mali/common/mali_scheduler_types.h
new file mode 100644
index 00000000..2d24a25c
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_scheduler_types.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_TYPES_H__
+#define __MALI_SCHEDULER_TYPES_H__
+
+#include "mali_osk.h"
+
+#define MALI_SCHEDULER_JOB_ID_SPAN 65535
+
+/**
+ * Bitmask used for defered scheduling of subsystems.
+ */
+typedef u32 mali_scheduler_mask;
+
+#define MALI_SCHEDULER_MASK_GP (1<<0)
+#define MALI_SCHEDULER_MASK_PP (1<<1)
+
+#define MALI_SCHEDULER_MASK_EMPTY 0
+#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
+
+typedef enum {
+ MALI_SCHEDULER_HINT_GP_BOUND = 0
+#define MALI_SCHEDULER_HINT_MAX 1
+} mali_scheduler_hint;
+
+#endif /* __MALI_SCHEDULER_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_session.c b/drivers/gpu/mali/mali/common/mali_session.c
new file mode 100644
index 00000000..6e9c2c14
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_session.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+static u32 mali_session_count = 0;
+
+_mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+ mali_sessions_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+ if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ _mali_osk_spinlock_irq_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_count++;
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_count--;
+ mali_session_unlock();
+}
+
+u32 mali_session_get_count(void)
+{
+ return mali_session_count;
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+u32 mali_session_max_window_num(void)
+{
+ struct mali_session_data *session, *tmp;
+ u32 max_window_num = 0;
+ u32 tmp_number = 0;
+
+ mali_session_lock();
+
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ tmp_number = _mali_osk_atomic_xchg(&session->number_of_window_jobs, 0);
+ if (max_window_num < tmp_number) {
+ max_window_num = tmp_number;
+ }
+ }
+
+ mali_session_unlock();
+
+ return max_window_num;
+}
+#endif
diff --git a/drivers/gpu/mali/mali/common/mali_session.h b/drivers/gpu/mali/mali/common/mali_session.h
new file mode 100644
index 00000000..d1a1a1b8
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_session.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_timeline_system;
+struct mali_soft_system;
+
+/* Number of frame builder job lists per session. */
+#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16
+#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
+
+struct mali_session_data {
+ _mali_osk_notification_queue_t *ioctl_queue;
+
+ _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+ mali_descriptor_mapping *descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+ _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
+#endif
+
+ _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */
+
+ struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
+ struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
+
+ mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
+ mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_sessions_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+u32 mali_session_get_count(void);
+
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+u32 mali_session_max_window_num(void);
+#endif
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_soft_job.c b/drivers/gpu/mali/mali/common/mali_soft_job.c
new file mode 100644
index 00000000..91dada9b
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_soft_job.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_soft_job.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_timeline.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_scheduler.h"
+
+MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ _mali_osk_spinlock_irq_lock(system->lock);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system));
+ MALI_DEBUG_ASSERT(0 == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = 0);
+ _mali_osk_spinlock_irq_unlock(system->lock);
+}
+
+#if defined(DEBUG)
+MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+}
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system)
+#else
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system)
+#endif /* defined(DEBUG) */
+
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
+{
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->session = session;
+
+ system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == system->lock) {
+ mali_soft_job_system_destroy(system);
+ return NULL;
+ }
+ system->lock_owner = 0;
+ system->last_job_id = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
+
+ return system;
+}
+
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ /* All jobs should be free at this point. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
+
+ if (NULL != system) {
+ if (NULL != system->lock) {
+ _mali_osk_spinlock_irq_term(system->lock);
+ }
+ _mali_osk_free(system);
+ }
+}
+
+static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+ MALI_DEBUG_ASSERT(system == job->system);
+
+ _mali_osk_list_del(&(job->system_list));
+
+ mali_soft_job_system_unlock(job->system);
+
+ _mali_osk_free(job);
+}
+
+MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ if (job->id == job_id)
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_soft_job_destroy(struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job));
+
+ if (NULL != job) {
+ if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return;
+
+ _mali_osk_atomic_term(&job->refcount);
+
+ if (NULL != job->activated_notification) {
+ _mali_osk_notification_delete(job->activated_notification);
+ job->activated_notification = NULL;
+ }
+
+ mali_soft_job_system_free_job(job->system, job);
+ }
+}
+
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
+{
+ struct mali_soft_job *job;
+ _mali_osk_notification_t *notification = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+ (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+ if (unlikely(NULL == notification)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+ return NULL;
+ }
+
+ job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+ if (unlikely(NULL == job)) {
+ MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
+ return NULL;
+ }
+
+ mali_soft_job_system_lock(system);
+
+ job->system = system;
+ job->id = system->last_job_id++;
+ job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+ _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
+ job->type = type;
+ job->user_job = user_job;
+ job->activated = MALI_FALSE;
+
+ job->activated_notification = notification;
+
+ _mali_osk_atomic_init(&job->refcount, 1);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ MALI_DEBUG_ASSERT(system == job->system);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+
+ mali_soft_job_system_unlock(system);
+
+ return job;
+}
+
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence)
+{
+ mali_timeline_point point;
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ system = job->system;
+
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system);
+
+ mali_soft_job_system_lock(system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ job->state = MALI_SOFT_JOB_STATE_STARTED;
+
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job));
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job);
+ point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT);
+
+ return point;
+}
+
+static mali_bool mali_soft_job_is_activated(void *data)
+{
+ struct mali_soft_job *job;
+
+ job = (struct mali_soft_job *) data;
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ return job->activated;
+}
+
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job;
+ struct mali_timeline_system *timeline_system;
+ mali_scheduler_mask schedule_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(system);
+
+ job = mali_soft_job_system_lookup_job(system, job_id);
+
+ if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+ || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+ mali_soft_job_system_unlock(system);
+ MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job));
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ /* Since the job now is in signaled state, timeouts from the timeline system will be
+ * ignored, and it is not possible to signal this job again. */
+
+ timeline_system = system->session->timeline_system;
+ MALI_DEBUG_ASSERT_POINTER(timeline_system);
+
+ /* Wait until activated. */
+ _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
+{
+ if (NULL != job->activated_notification) {
+ _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer;
+ res->user_job = job->user_job;
+ mali_session_send_notification(job->system->session, job->activated_notification);
+ }
+ job->activated_notification = NULL;
+}
+
+void mali_soft_job_system_activate_job(struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));
+
+ mali_soft_job_system_unlock(job->system);
+
+ /* Since we are in shutdown, we can ignore the scheduling bitmask. */
+ mali_timeline_tracker_release(&job->tracker);
+ mali_soft_job_destroy(job);
+ return;
+ }
+
+ /* Send activated notification. */
+ mali_soft_job_send_activated_notification(job);
+
+ /* Wake up sleeping signaler. */
+ job->activated = MALI_TRUE;
+
+ /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+ if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+ mali_scheduler_mask schedule_mask;
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+ mali_soft_job_destroy(job);
+ } else {
+ _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+ mali_soft_job_system_unlock(job->system);
+ }
+}
+
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ /* The session is aborting. This job will be released and destroyed by @ref
+ * mali_soft_job_system_abort(). */
+ mali_soft_job_system_unlock(job->system);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ if (MALI_SOFT_JOB_STATE_STARTED != job->state) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ /* The job is about to be signaled, ignore timeout. */
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job));
+ mali_soft_job_system_unlock(job->system);
+ return schedule_mask;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_TIMED_OUT;
+ _mali_osk_atomic_inc(&job->refcount);
+
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+ mali_soft_job_destroy(job);
+
+ return schedule_mask;
+}
+
+void mali_soft_job_system_abort(struct mali_soft_job_system *system)
+{
+ struct mali_soft_job *job, *tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session));
+
+ mali_soft_job_system_lock(system);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
+ /* If the job has been activated, we have to release the tracker and destroy
+ * the job. If not, the tracker will be released and the job destroyed when
+ * it is activated. */
+ if (MALI_TRUE == job->activated) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job));
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job));
+
+ /* We need to destroy this soft job. */
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ }
+
+ mali_soft_job_system_unlock(system);
+
+ /* Release and destroy jobs. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ /* Move job back to used list before destroying. */
+ _mali_osk_list_move(&job->system_list, &system->jobs_used);
+
+ mali_soft_job_destroy(job);
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_soft_job.h b/drivers/gpu/mali/mali/common/mali_soft_job.h
new file mode 100644
index 00000000..77711b1b
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_soft_job.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SOFT_JOB_H__
+#define __MALI_SOFT_JOB_H__
+
+#include "mali_osk.h"
+
+#include "mali_timeline.h"
+
+struct mali_timeline_fence;
+struct mali_session_data;
+struct mali_soft_job;
+struct mali_soft_job_system;
+
+/**
+ * Soft job types.
+ *
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
+ * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
+ * by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
+ */
+typedef enum mali_soft_job_type {
+ MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
+ MALI_SOFT_JOB_TYPE_USER_SIGNALED,
+} mali_soft_job_type;
+
+/**
+ * Soft job state.
+ *
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
+ * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft
+ * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
+ *
+ * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
+ * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state
+ * MALI_SOFT_JOB_STATE_STARTED.
+ *
+ */
+typedef enum mali_soft_job_state {
+ MALI_SOFT_JOB_STATE_ALLOCATED,
+ MALI_SOFT_JOB_STATE_STARTED,
+ MALI_SOFT_JOB_STATE_SIGNALED,
+ MALI_SOFT_JOB_STATE_TIMED_OUT,
+} mali_soft_job_state;
+
+#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
+
+/**
+ * Soft job struct.
+ *
+ * Soft job can be used to represent any kind of CPU work done in kernel-space.
+ */
+typedef struct mali_soft_job {
+ mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */
+ u64 user_job; /**< Identifier for soft job in user space. */
+ _mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */
+ mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
+ _mali_osk_notification_t *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */
+
+ /* Protected by soft job system lock. */
+ u32 id; /**< Used by user-space to find corresponding soft job in kernel-space. */
+ mali_soft_job_state state; /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */
+ struct mali_soft_job_system *system; /**< The soft job system this job is in. */
+ _mali_osk_list_t system_list; /**< List element used by soft job system. */
+} mali_soft_job;
+
+/**
+ * Per-session soft job system.
+ *
+ * The soft job system is used to manage all soft jobs that belongs to a session.
+ */
+typedef struct mali_soft_job_system {
+ struct mali_session_data *session; /**< The session this soft job system belongs to. */
+ _MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */
+
+ _mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */
+ u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */
+ u32 last_job_id; /**< Recored the last job id protected by lock. */
+} mali_soft_job_system;
+
+/**
+ * Create a soft job system.
+ *
+ * @param session The session this soft job system will belong to.
+ * @return The new soft job system, or NULL if unsuccessful.
+ */
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session);
+
+/**
+ * Destroy a soft job system.
+ *
+ * @note The soft job must not have any started or activated jobs. Call @ref
+ * mali_soft_job_system_abort first.
+ *
+ * @param system The soft job system we are destroying.
+ */
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
+
+/**
+ * Create a soft job.
+ *
+ * @param system Soft job system to create soft job from.
+ * @param type Type of the soft job.
+ * @param user_job Identifier for soft job in user space.
+ * @return New soft job if successful, NULL if not.
+ */
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
+
+/**
+ * Destroy soft job.
+ *
+ * @param job Soft job to destroy.
+ */
+void mali_soft_job_destroy(struct mali_soft_job *job);
+
+/**
+ * Start a soft job.
+ *
+ * The soft job will be added to the Timeline system which will then activate it after all
+ * dependencies have been resolved.
+ *
+ * Create soft jobs with @ref mali_soft_job_create before starting them.
+ *
+ * @param job Soft job to start.
+ * @param fence Fence representing dependencies for this soft job.
+ * @return Point on soft job timeline.
+ */
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence);
+
+/**
+ * Use by user-space to signal that a soft job has completed.
+ *
+ * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED.
+ *
+ * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful.
+ *
+ * @note If the soft job was signaled successfully, or it received a time out, the soft job will be
+ * destroyed after this call and should no longer be used.
+ *
+ * @note This function will block until the soft job has been activated.
+ *
+ * @param system The soft job system the job was started in.
+ * @param job_id ID of soft job we are signaling.
+ *
+ * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the
+ * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job.
+ */
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id);
+
+/**
+ * Used by the Timeline system to activate a soft job.
+ *
+ * @param job The soft job that is being activated.
+ */
+void mali_soft_job_system_activate_job(struct mali_soft_job *job);
+
+/**
+ * Used by the Timeline system to timeout a soft job.
+ *
+ * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after
+ * activation.
+ *
+ * @param job The soft job that is being timed out.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job);
+
+/**
+ * Used to cleanup activated soft jobs in the soft job system on session abort.
+ *
+ * @param system The soft job system that is being aborted.
+ */
+void mali_soft_job_system_abort(struct mali_soft_job_system *system);
+
+#endif /* __MALI_SOFT_JOB_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_spinlock_reentrant.c b/drivers/gpu/mali/mali/common/mali_spinlock_reentrant.c
new file mode 100644
index 00000000..de076abf
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_spinlock_reentrant.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_spinlock_reentrant.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
+{
+ struct mali_spinlock_reentrant *spinlock;
+
+ spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
+ if (NULL == spinlock) {
+ return NULL;
+ }
+
+ spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
+ if (NULL == spinlock->lock) {
+ mali_spinlock_reentrant_term(spinlock);
+ return NULL;
+ }
+
+ return spinlock;
+}
+
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner);
+
+ if (NULL != spinlock->lock) {
+ _mali_osk_spinlock_irq_term(spinlock->lock);
+ }
+
+ _mali_osk_free(spinlock);
+}
+
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid);
+
+ MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__));
+
+ if (tid != spinlock->owner) {
+ _mali_osk_spinlock_irq_lock(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter);
+ spinlock->owner = tid;
+ }
+
+ MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__));
+
+ ++spinlock->counter;
+}
+
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner);
+
+ --spinlock->counter;
+ if (0 == spinlock->counter) {
+ spinlock->owner = 0;
+ MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__));
+ _mali_osk_spinlock_irq_unlock(spinlock->lock);
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_spinlock_reentrant.h b/drivers/gpu/mali/mali/common/mali_spinlock_reentrant.h
new file mode 100644
index 00000000..f252ab46
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_spinlock_reentrant.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SPINLOCK_REENTRANT_H__
+#define __MALI_SPINLOCK_REENTRANT_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * Reentrant spinlock.
+ */
+struct mali_spinlock_reentrant {
+ _mali_osk_spinlock_irq_t *lock;
+ u32 owner;
+ u32 counter;
+};
+
+/**
+ * Create a new reentrant spinlock.
+ *
+ * @param lock_order Lock order.
+ * @return New reentrant spinlock.
+ */
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order);
+
+/**
+ * Terminate reentrant spinlock and free any associated resources.
+ *
+ * @param spinlock Reentrant spinlock to terminate.
+ */
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock);
+
+/**
+ * Wait for reentrant spinlock to be signaled.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Signal reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Check if thread is holding reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ return (tid == spinlock->owner && 0 < spinlock->counter);
+}
+
+#endif /* __MALI_SPINLOCK_REENTRANT_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_timeline.c b/drivers/gpu/mali/mali/common/mali_timeline.c
new file mode 100644
index 00000000..21fec003
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_timeline.c
@@ -0,0 +1,1452 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+#include "mali_scheduler.h"
+#include "mali_soft_job.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter);
+
+#if defined(CONFIG_SYNC)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+struct mali_deferred_fence_put_entry {
+ struct hlist_node list;
+ struct sync_fence *fence;
+};
+
+static HLIST_HEAD(mali_timeline_sync_fence_to_free_list);
+static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock);
+
+static void put_sync_fences(struct work_struct *ignore)
+{
+ struct hlist_head list;
+ struct hlist_node *tmp, *pos;
+ unsigned long flags;
+ struct mali_deferred_fence_put_entry *o;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
+ sync_fence_put(o->fence);
+ kfree(o);
+ }
+}
+
+static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+/* Callback that is called when a sync fence a tracker is waiting on is signaled. */
+static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_waiter *waiter;
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool is_aborting = MALI_FALSE;
+ int fence_status = sync_fence->status;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
+
+ tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ is_aborting = system->session->is_aborting;
+ if (!is_aborting && (0 > fence_status)) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ }
+
+ waiter = tracker->waiter_sync;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ tracker->sync_fence = NULL;
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+ {
+ struct mali_deferred_fence_put_entry *obj;
+
+ obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
+ if (obj) {
+ unsigned long flags;
+ mali_bool schedule = MALI_FALSE;
+
+ obj->fence = sync_fence;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
+ schedule = MALI_TRUE;
+ hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ if (schedule)
+ schedule_delayed_work(&delayed_sync_fence_put, 0);
+ }
+ }
+#else
+ sync_fence_put(sync_fence);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+ if (!is_aborting) {
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+}
+#endif /* defined(CONFIG_SYNC) */
+
+static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type);
+
+ return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job);
+}
+
+static void mali_timeline_timer_callback(void *data)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker;
+ struct mali_timeline *timeline;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ timeline = (struct mali_timeline *) data;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ if (!system->timer_enabled) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ tracker = timeline->tracker_tail;
+ timeline->timer_active = MALI_FALSE;
+
+ if (NULL != tracker && MALI_TRUE == tracker->timer_active) {
+ /* This is likely the delayed work that has been schedule out before cancelled. */
+ if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ schedule_mask = mali_timeline_tracker_time_out(tracker);
+ tracker->timer_active = MALI_FALSE;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n"));
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ system->timer_enabled = MALI_FALSE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+ }
+}
+
+static void mali_timeline_destroy(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ if (NULL != timeline) {
+ /* Assert that the timeline object has been properly cleaned up before destroying it. */
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != timeline->system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != timeline->sync_tl) {
+ sync_timeline_destroy(timeline->sync_tl);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+ }
+
+ _mali_osk_free(timeline);
+ }
+}
+
+static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX);
+
+ timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline));
+ if (NULL == timeline) {
+ return NULL;
+ }
+
+ /* Initially the timeline is empty. */
+#if defined(MALI_TIMELINE_DEBUG_START_POINT)
+ /* Start the timeline a bit before wrapping when debugging. */
+ timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128;
+#else
+ timeline->point_next = 1;
+#endif
+ timeline->point_oldest = timeline->point_next;
+
+ /* The tracker and waiter lists will initially be empty. */
+
+ timeline->system = system;
+ timeline->id = id;
+
+ timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline);
+ if (NULL == timeline->delayed_work) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->timer_active = MALI_FALSE;
+
+#if defined(CONFIG_SYNC)
+ {
+ char timeline_name[32];
+
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_PP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_SOFT:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid());
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id));
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->sync_tl = mali_sync_timeline_create(timeline_name);
+ if (NULL == timeline->sync_tl) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ return timeline;
+}
+
+static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ if (mali_timeline_is_full(timeline)) {
+ /* Don't add tracker if timeline is full. */
+ tracker->point = MALI_TIMELINE_NO_POINT;
+ return;
+ }
+
+ tracker->timeline = timeline;
+ tracker->point = timeline->point_next;
+
+ /* Find next available point. */
+ timeline->point_next++;
+ if (MALI_TIMELINE_NO_POINT == timeline->point_next) {
+ timeline->point_next++;
+ }
+
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+
+ /* Add tracker as new head on timeline's tracker list. */
+ if (NULL == timeline->tracker_head) {
+ /* Tracker list is empty. */
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+
+ timeline->tracker_tail = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev);
+ } else {
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+
+ tracker->timeline_prev = timeline->tracker_head;
+ timeline->tracker_head->timeline_next = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ }
+ timeline->tracker_head = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev);
+}
+
+/* Inserting the waiter object into the given timeline */
+static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new)
+{
+ struct mali_timeline_waiter *waiter_prev;
+ struct mali_timeline_waiter *waiter_next;
+
+ /* Waiter time must be between timeline head and tail, and there must
+ * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
+ MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+ MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
+
+ /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
+ waiter_prev = timeline->waiter_head; /* Insert new after waiter_prev */
+ waiter_next = NULL; /* Insert new before waiter_next */
+
+ /* Iterating backwards from head (newest) to tail (oldest) until we
+ * find the correct spot to insert the new waiter */
+ while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) {
+ waiter_next = waiter_prev;
+ waiter_prev = waiter_prev->timeline_prev;
+ }
+
+ if (NULL == waiter_prev && NULL == waiter_next) {
+ /* list is empty */
+ timeline->waiter_head = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else if (NULL == waiter_next) {
+ /* insert at head */
+ waiter_new->timeline_prev = timeline->waiter_head;
+ timeline->waiter_head->timeline_next = waiter_new;
+ timeline->waiter_head = waiter_new;
+ } else if (NULL == waiter_prev) {
+ /* insert at tail */
+ waiter_new->timeline_next = timeline->waiter_tail;
+ timeline->waiter_tail->timeline_prev = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else {
+ /* insert between */
+ waiter_new->timeline_next = waiter_next;
+ waiter_new->timeline_prev = waiter_prev;
+ waiter_next->timeline_prev = waiter_new;
+ waiter_prev->timeline_next = waiter_new;
+ }
+}
+
+static void mali_timeline_update_delayed_work(struct mali_timeline *timeline)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *oldest_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Timer is disabled, early out. */
+ if (!system->timer_enabled) return;
+
+ oldest_tracker = timeline->tracker_tail;
+ if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) {
+ if (MALI_FALSE == oldest_tracker->timer_active) {
+ if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ }
+ _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ);
+ oldest_tracker->timer_active = MALI_TRUE;
+ timeline->timer_active = MALI_TRUE;
+ }
+ } else if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+}
+
+static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ MALI_DEBUG_CODE({
+ struct mali_timeline_system *system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ });
+
+ if (NULL != timeline->tracker_tail) {
+ /* Set oldest point to oldest tracker's point */
+ timeline->point_oldest = timeline->tracker_tail->point;
+ } else {
+ /* No trackers, mark point list as empty */
+ timeline->point_oldest = timeline->point_next;
+ }
+
+ /* Release all waiters no longer on the timeline's point list.
+ * Releasing a waiter can trigger this function to be called again, so
+ * we do not store any pointers on stack. */
+ while (NULL != timeline->waiter_tail) {
+ u32 waiter_time_relative;
+ u32 time_head_relative;
+ struct mali_timeline_waiter *waiter = timeline->waiter_tail;
+
+ time_head_relative = timeline->point_next - timeline->point_oldest;
+ waiter_time_relative = waiter->point - timeline->point_oldest;
+
+ if (waiter_time_relative < time_head_relative) {
+ /* This and all following waiters are on the point list, so we are done. */
+ break;
+ }
+
+ /* Remove waiter from timeline's waiter list. */
+ if (NULL != waiter->timeline_next) {
+ waiter->timeline_next->timeline_prev = NULL;
+ } else {
+ /* This was the last waiter */
+ timeline->waiter_head = NULL;
+ }
+ timeline->waiter_tail = waiter->timeline_next;
+
+ /* Release waiter. This could activate a tracker, if this was
+ * the last waiter for the tracker. */
+ schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type);
+
+ /* Zero out all tracker members. */
+ _mali_osk_memset(tracker, 0, sizeof(*tracker));
+
+ tracker->type = type;
+ tracker->job = job;
+ tracker->trigger_ref_count = 1; /* Prevents any callback from trigging while adding it */
+ tracker->os_tick_create = _mali_osk_time_tickcount();
+ MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC);
+
+ tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE;
+
+ /* Copy fence. */
+ if (NULL != fence) {
+ _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence));
+ }
+}
+
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker_next, *tracker_prev;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ /* Upon entry a group lock will be held, but not a scheduler lock. */
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ /* Tracker should have been triggered */
+ MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count);
+
+ /* All waiters should have been released at this point */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job));
+
+ timeline = tracker->timeline;
+ if (NULL == timeline) {
+ /* Tracker was not on a timeline, there is nothing to release. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Tracker should still be on timeline */
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point));
+
+ /* Tracker is no longer valid. */
+ MALI_DEBUG_CODE(tracker->magic = 0);
+
+ tracker_next = tracker->timeline_next;
+ tracker_prev = tracker->timeline_prev;
+ tracker->timeline_next = NULL;
+ tracker->timeline_prev = NULL;
+
+ /* Removing tracker from timeline's tracker list */
+ if (NULL == tracker_next) {
+ /* This tracker was the head */
+ timeline->tracker_head = tracker_prev;
+ } else {
+ tracker_next->timeline_prev = tracker_prev;
+ }
+
+ if (NULL == tracker_prev) {
+ /* This tracker was the tail */
+ timeline->tracker_tail = tracker_next;
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ /* Update the timeline's oldest time and release any waiters */
+ schedule_mask |= mali_timeline_update_oldest_point(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ } else {
+ tracker_prev->timeline_next = tracker_next;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Update delayed work only when it is the soft job timeline */
+ if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+ mali_timeline_update_delayed_work(tracker->timeline);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *tail,
+ struct mali_timeline_waiter *head)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(head);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ head->tracker_next = system->waiter_empty_list;
+ system->waiter_empty_list = tail;
+}
+
+static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_timeline_system *system;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker->os_tick_activate = _mali_osk_time_tickcount();
+
+ if (NULL != tracker->waiter_head) {
+ mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head);
+ tracker->waiter_head = NULL;
+ tracker->waiter_tail = NULL;
+ }
+
+ switch (tracker->type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ schedule_mask = mali_gp_scheduler_activate_job((struct mali_gp_job *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_PP:
+ schedule_mask = mali_pp_scheduler_activate_job((struct mali_pp_job *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SOFT:
+ timeline = tracker->timeline;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+
+ /* Start a soft timer to make sure the soft job be released in a limited time */
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_update_delayed_work(timeline);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ break;
+ case MALI_TIMELINE_TRACKER_WAIT:
+ mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SYNC:
+#if defined(CONFIG_SYNC)
+ mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job);
+#else
+ MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type));
+#endif /* defined(CONFIG_SYNC) */
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type));
+ break;
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker)
+{
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count++;
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error)
+{
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count--;
+
+ tracker->activation_error |= activation_error;
+
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(uk_fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ fence->points[i] = uk_fence->points[i];
+ }
+
+ fence->sync_fd = uk_fence->sync_fd;
+}
+
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session)
+{
+ u32 i;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n"));
+
+ system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+ if (NULL == system->spinlock) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i);
+ if (NULL == system->timelines[i]) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ system->signaled_sync_tl = mali_sync_timeline_create("mali-always-signaled");
+ if (NULL == system->signaled_sync_tl) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ system->waiter_empty_list = NULL;
+ system->session = session;
+ system->timer_enabled = MALI_TRUE;
+
+ system->wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == system->wait_queue) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ return system;
+}
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Check if there are any trackers left on timeline.
+ *
+ * Used as a wait queue conditional.
+ *
+ * @param data Timeline.
+ * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_has_no_trackers(void *data)
+{
+ struct mali_timeline *timeline = (struct mali_timeline *) data;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ return mali_timeline_is_empty(timeline);
+}
+
+/**
+ * Cancel sync fence waiters waited upon by trackers on all timelines.
+ *
+ * Will return after all timelines have no trackers left.
+ *
+ * @param system Timeline system.
+ */
+static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_tracker *tracker, *tracker_next;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Cancel sync fence waiters. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker_next = timeline->tracker_tail;
+ while (NULL != tracker_next) {
+ tracker = tracker_next;
+ tracker_next = tracker->timeline_next;
+
+ if (NULL == tracker->sync_fence) continue;
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker));
+
+ /* Cancel sync fence waiter. */
+ if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+ /* Callback was not called, move tracker to local list. */
+ _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list);
+ }
+ }
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */
+ _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) {
+ mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter);
+ }
+
+ /* Sleep until all sync fence callbacks are done and all timelines are empty. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+ }
+}
+
+#endif /* defined(CONFIG_SYNC) */
+
+void mali_timeline_system_abort(struct mali_timeline_system *system)
+{
+ MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session));
+
+#if defined(CONFIG_SYNC)
+ mali_timeline_cancel_sync_fence_waiters(system);
+#endif /* defined(CONFIG_SYNC) */
+
+ /* Should not be any waiters or trackers left at this point. */
+ MALI_DEBUG_CODE({
+ u32 i;
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i)
+ {
+ struct mali_timeline *timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ }
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ });
+}
+
+void mali_timeline_system_destroy(struct mali_timeline_system *system)
+{
+ u32 i;
+ struct mali_timeline_waiter *waiter, *next;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
+
+ if (NULL != system) {
+ /* There should be no waiters left on this queue. */
+ if (NULL != system->wait_queue) {
+ _mali_osk_wait_queue_term(system->wait_queue);
+ system->wait_queue = NULL;
+ }
+
+ /* Free all waiters in empty list */
+ waiter = system->waiter_empty_list;
+ while (NULL != waiter) {
+ next = waiter->tracker_next;
+ _mali_osk_free(waiter);
+ waiter = next;
+ }
+
+#if defined(CONFIG_SYNC)
+ if (NULL != system->signaled_sync_tl) {
+ sync_timeline_destroy(system->signaled_sync_tl);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (NULL != system->timelines[i]) {
+ mali_timeline_destroy(system->timelines[i]);
+ }
+ }
+ if (NULL != system->spinlock) {
+ mali_spinlock_reentrant_term(system->spinlock);
+ }
+
+ _mali_osk_free(system);
+ }
+}
+
+/**
+ * Find how many waiters are needed for a given fence.
+ *
+ * @param fence The fence to check.
+ * @return Number of waiters needed for fence.
+ */
+static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence)
+{
+ u32 i, num_waiters = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (MALI_TIMELINE_NO_POINT != fence->points[i]) {
+ ++num_waiters;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ if (-1 != fence->sync_fd) ++num_waiters;
+#endif /* defined(CONFIG_SYNC) */
+
+ return num_waiters;
+}
+
+static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system)
+{
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ waiter = system->waiter_empty_list;
+ if (NULL != waiter) {
+ /* Remove waiter from empty list and zero it */
+ system->waiter_empty_list = waiter->tracker_next;
+ _mali_osk_memset(waiter, 0, sizeof(*waiter));
+ }
+
+ /* Return NULL if list was empty. */
+ return waiter;
+}
+
+static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
+ struct mali_timeline_waiter **tail,
+ struct mali_timeline_waiter **head,
+ int max_num_waiters)
+{
+ u32 i, tid = _mali_osk_get_tid();
+ mali_bool do_alloc;
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT_POINTER(head);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ *head = *tail = NULL;
+ do_alloc = MALI_FALSE;
+ i = 0;
+ while (i < max_num_waiters) {
+ if (MALI_FALSE == do_alloc) {
+ waiter = mali_timeline_system_get_zeroed_waiter(system);
+ if (NULL == waiter) {
+ do_alloc = MALI_TRUE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ continue;
+ }
+ } else {
+ waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter));
+ if (NULL == waiter) break;
+ }
+ ++i;
+ if (NULL == *tail) {
+ *tail = waiter;
+ *head = waiter;
+ } else {
+ (*head)->tracker_next = waiter;
+ *head = waiter;
+ }
+ }
+ if (MALI_TRUE == do_alloc) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ }
+}
+
+/**
+ * Create waiters for the given tracker. The tracker is activated when all waiters are release.
+ *
+ * @note Tracker can potentially be activated before this function returns.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker we will create waiters for.
+ * @param waiter_tail List of pre-allocated waiters.
+ * @param waiter_head List of pre-allocated waiters.
+ */
+static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ struct mali_timeline_waiter *waiter_tail,
+ struct mali_timeline_waiter *waiter_head)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+#if defined(CONFIG_SYNC)
+ struct sync_fence *sync_fence = NULL;
+#endif /* defined(CONFIG_SYNC) */
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != tracker->job);
+
+ /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter
+ * into the timelines sorted list of waiters */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ struct mali_timeline_waiter *waiter;
+
+ /* Get point on current timeline from tracker's fence. */
+ point = tracker->fence.points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline so we don't need a waiter. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
+ point, timeline->point_oldest, timeline->point_next));
+ continue;
+ }
+
+ if (likely(mali_timeline_is_point_released(timeline, point))) {
+ /* Tracker representing the point has been released so we don't need a
+ * waiter. */
+ continue;
+ }
+
+ /* The point is on timeline. */
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
+
+ /* Get a new zeroed waiter object. */
+ if (likely(NULL != waiter_tail)) {
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ continue;
+ }
+
+ /* Yanking the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = point;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Add waiter to timeline. */
+ mali_timeline_insert_waiter(timeline, waiter);
+ }
+#if defined(CONFIG_SYNC)
+ if (-1 != tracker->fence.sync_fd) {
+ int ret;
+ struct mali_timeline_waiter *waiter;
+
+ sync_fence = sync_fence_fdget(tracker->fence.sync_fd);
+ if (unlikely(NULL == sync_fence)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd));
+ goto exit;
+ }
+
+ /* Check if we have a zeroed waiter object available. */
+ if (unlikely(NULL == waiter_tail)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ goto exit;
+ }
+
+ /* Start asynchronous wait that will release waiter when the fence is signaled. */
+ sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+ ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+ if (1 == ret) {
+ /* Fence already signaled, no waiter needed. */
+ goto exit;
+ } else if (0 != ret) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ goto exit;
+ }
+
+ /* Grab new zeroed waiter object. */
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+
+ /* Increase the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Also store waiter in separate field for easy access by sync callback. */
+ tracker->waiter_sync = waiter;
+
+ /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */
+ tracker->sync_fence = sync_fence;
+
+ sync_fence = NULL;
+ }
+exit:
+#endif /* defined(CONFIG_SYNC) */
+
+ if (NULL != waiter_tail) {
+ mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
+ }
+
+ /* Release the initial trigger ref count. */
+ tracker->trigger_ref_count--;
+
+ /* If there were no waiters added to this tracker we activate immediately. */
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != sync_fence) {
+ sync_fence_put(sync_fence);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id)
+{
+ int num_waiters = 0;
+ struct mali_timeline_waiter *waiter_tail, *waiter_head;
+ u32 tid = _mali_osk_get_tid();
+ mali_timeline_point point = MALI_TIMELINE_NO_POINT;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id));
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->system = system;
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+
+ /* Allocate waiters. */
+ mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Add tracker to timeline. This will allocate a point for the tracker on the timeline. If
+ * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the
+ * point will be MALI_TIMELINE_NO_POINT.
+ *
+ * NOTE: the tracker can fail to be added if the timeline is full. If this happens, the
+ * point will be MALI_TIMELINE_NO_POINT. */
+ MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE);
+ if (likely(timeline_id < MALI_TIMELINE_MAX)) {
+ struct mali_timeline *timeline = system->timelines[timeline_id];
+ mali_timeline_insert_tracker(timeline, tracker);
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ }
+
+ point = tracker->point;
+
+ /* Create waiters for tracker based on supplied fence. Each waiter will increase the
+ * trigger ref count. */
+ mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head);
+ tracker = NULL;
+
+ /* At this point the tracker object might have been freed so we should no longer
+ * access it. */
+
+
+ /* The tracker will always be activated after calling add_tracker, even if NO_POINT is
+ * returned. */
+ return point;
+}
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter)
+{
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker = waiter->tracker;
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ /* At this point the waiter has been removed from the timeline's waiter list, but it is
+ * still on the tracker's waiter list. All of the tracker's waiters will be released when
+ * the tracker is activated. */
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = NULL;
+
+ tracker->trigger_ref_count--;
+ if (0 == tracker->trigger_ref_count) {
+ /* This was the last waiter; activate tracker */
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ return schedule_mask;
+}
+
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id)
+{
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ if (MALI_TIMELINE_MAX <= timeline_id) {
+ return MALI_TIMELINE_NO_POINT;
+ }
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ timeline = system->timelines[timeline_id];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ point = MALI_TIMELINE_NO_POINT;
+ if (timeline->point_oldest != timeline->point_next) {
+ point = timeline->point_next - 1;
+ if (MALI_TIMELINE_NO_POINT == point) point--;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return point;
+}
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT_POINTER(tracker->timeline);
+ timeline = tracker->timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline->system);
+ system = timeline->system;
+
+ if (MALI_TIMELINE_MAX > id ) {
+ if(MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+ return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+ } else {
+ return MALI_FALSE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
+ return MALI_FALSE;
+ }
+}
+
+static const char *timeline_id_to_string(enum mali_timeline_id id)
+{
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ return "GP";
+ case MALI_TIMELINE_PP:
+ return "PP";
+ case MALI_TIMELINE_SOFT:
+ return "SOFT";
+ default:
+ return "NONE";
+ }
+}
+
+static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type)
+{
+ switch (type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ return "GP";
+ case MALI_TIMELINE_TRACKER_PP:
+ return "PP";
+ case MALI_TIMELINE_TRACKER_SOFT:
+ return "SOFT";
+ case MALI_TIMELINE_TRACKER_WAIT:
+ return "WAIT";
+ case MALI_TIMELINE_TRACKER_SYNC:
+ return "SYNC";
+ default:
+ return "INVALID";
+ }
+}
+
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ timeline = tracker->timeline;
+
+ if (0 != tracker->trigger_ref_count) {
+ return MALI_TIMELINE_TS_WAITING;
+ }
+
+ if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) {
+ return MALI_TIMELINE_TS_ACTIVE;
+ }
+
+ if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) {
+ return MALI_TIMELINE_TS_INIT;
+ }
+
+ return MALI_TIMELINE_TS_FINISH;
+}
+
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
+{
+ const char *tracker_state = "IWAF";
+ char state_char = 'I';
+ char tracker_type[32] = {0};
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+ _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+ if (0 != tracker->trigger_ref_count) {
+#if defined(CONFIG_SYNC)
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u, fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ } else {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ }
+#else
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+ tracker->job);
+ } else {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->job);
+ }
+#endif
+}
+
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
+{
+ struct mali_timeline_tracker *tracker = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker = timeline->tracker_tail;
+ while (NULL != tracker) {
+ mali_timeline_debug_print_tracker(tracker, print_ctx);
+ tracker = tracker->timeline_next;
+ }
+}
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
+{
+ int i;
+ int num_printed = 0;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Print all timelines */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL == timeline->tracker_head) continue;
+
+ _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+ timeline_id_to_string((enum mali_timeline_id)i));
+
+ mali_timeline_debug_print_timeline(timeline, print_ctx);
+ num_printed++;
+ }
+
+ if (0 == num_printed) {
+ _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
diff --git a/drivers/gpu/mali/mali/common/mali_timeline.h b/drivers/gpu/mali/mali/common/mali_timeline.h
new file mode 100644
index 00000000..646501eb
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_timeline.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMELINE_H__
+#define __MALI_TIMELINE_H__
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+#include "mali_sync.h"
+#include "mali_scheduler_types.h"
+
+/**
+ * Soft job timeout.
+ *
+ * Soft jobs have to be signaled as complete after activation. Normally this is done by user space,
+ * but in order to guarantee that every soft job is completed, we also have a timer.
+ */
+#define MALI_TIMELINE_TIMEOUT_HZ ((u32) (HZ * 3 / 2)) /* 1500 ms. */
+
+/**
+ * Timeline type.
+ */
+typedef enum mali_timeline_id {
+ MALI_TIMELINE_GP = MALI_UK_TIMELINE_GP, /**< GP job timeline. */
+ MALI_TIMELINE_PP = MALI_UK_TIMELINE_PP, /**< PP job timeline. */
+ MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */
+ MALI_TIMELINE_MAX = MALI_UK_TIMELINE_MAX
+} mali_timeline_id;
+
+/**
+ * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker).
+ */
+#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX
+
+/**
+ * Tracker type.
+ */
+typedef enum mali_timeline_tracker_type {
+ MALI_TIMELINE_TRACKER_GP = 0, /**< Tracker used by GP jobs. */
+ MALI_TIMELINE_TRACKER_PP = 1, /**< Tracker used by PP jobs. */
+ MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */
+ MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */
+ MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */
+ MALI_TIMELINE_TRACKER_MAX = 5,
+} mali_timeline_tracker_type;
+
+/**
+ * Tracker activation error.
+ */
+typedef u32 mali_timeline_activation_error;
+#define MALI_TIMELINE_ACTIVATION_ERROR_NONE 0
+#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT (1<<1)
+#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0)
+
+/**
+ * Type used to represent a point on a timeline.
+ */
+typedef u32 mali_timeline_point;
+
+/**
+ * Used to represent that no point on a timeline.
+ */
+#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0)
+
+/**
+ * The maximum span of points on a timeline. A timeline will be considered full if the difference
+ * between the oldest and newest points is equal or larger to this value.
+ */
+#define MALI_TIMELINE_MAX_POINT_SPAN 65536
+
+/**
+ * Magic value used to assert on validity of trackers.
+ */
+#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd
+
+struct mali_timeline;
+struct mali_timeline_waiter;
+struct mali_timeline_tracker;
+
+/**
+ * Timeline fence.
+ */
+struct mali_timeline_fence {
+ mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */
+ s32 sync_fd; /**< A file descriptor representing a sync fence, or -1. */
+};
+
+/**
+ * Timeline system.
+ *
+ * The Timeline system has a set of timelines associated with a session.
+ */
+struct mali_timeline_system {
+ struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */
+ struct mali_timeline *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */
+
+ /* Single-linked list of unused waiter objects. Uses the tracker_next field in tracker. */
+ struct mali_timeline_waiter *waiter_empty_list;
+
+ struct mali_session_data *session; /**< Session that owns this system. */
+
+ mali_bool timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */
+
+ _mali_osk_wait_queue_t *wait_queue; /**< Wait queue. */
+
+#if defined(CONFIG_SYNC)
+ struct sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#endif /* defined(CONFIG_SYNC) */
+};
+
+/**
+ * Timeline. Each Timeline system will have MALI_TIMELINE_MAX timelines.
+ */
+struct mali_timeline {
+ mali_timeline_point point_next; /**< The next available point. */
+ mali_timeline_point point_oldest; /**< The oldest point not released. */
+
+ /* Double-linked list of trackers. Sorted in ascending order by tracker->time_number with
+ * tail pointing to the tracker with the oldest time. */
+ struct mali_timeline_tracker *tracker_head;
+ struct mali_timeline_tracker *tracker_tail;
+
+ /* Double-linked list of waiters. Sorted in ascending order by waiter->time_number_wait
+ * with tail pointing to the waiter with oldest wait time. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+ struct mali_timeline_system *system; /**< Timeline system this timeline belongs to. */
+ enum mali_timeline_id id; /**< Timeline type. */
+
+#if defined(CONFIG_SYNC)
+ struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */
+#endif /* defined(CONFIG_SYNC) */
+
+ /* The following fields are used to time out soft job trackers. */
+ _mali_osk_wq_delayed_work_t *delayed_work;
+ mali_bool timer_active;
+};
+
+/**
+ * Timeline waiter.
+ */
+struct mali_timeline_waiter {
+ mali_timeline_point point; /**< Point on timeline we are waiting for to be released. */
+ struct mali_timeline_tracker *tracker; /**< Tracker that is waiting. */
+
+ struct mali_timeline_waiter *timeline_next; /**< Next waiter on timeline's waiter list. */
+ struct mali_timeline_waiter *timeline_prev; /**< Previous waiter on timeline's waiter list. */
+
+ struct mali_timeline_waiter *tracker_next; /**< Next waiter on tracker's waiter list. */
+};
+
+/**
+ * Timeline tracker.
+ */
+struct mali_timeline_tracker {
+ MALI_DEBUG_CODE(u32 magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */
+
+ mali_timeline_point point; /**< Point on timeline for this tracker */
+
+ struct mali_timeline_tracker *timeline_next; /**< Next tracker on timeline's tracker list */
+ struct mali_timeline_tracker *timeline_prev; /**< Previous tracker on timeline's tracker list */
+
+ u32 trigger_ref_count; /**< When zero tracker will be activated */
+ mali_timeline_activation_error activation_error; /**< Activation error. */
+ struct mali_timeline_fence fence; /**< Fence used to create this tracker */
+
+ /* Single-linked list of waiters. Sorted in order of insertions with
+ * tail pointing to first waiter. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+#if defined(CONFIG_SYNC)
+ /* These are only used if the tracker is waiting on a sync fence. */
+ struct mali_timeline_waiter *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */
+ struct sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+ struct sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */
+ _mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+#endif /* defined(CONFIG_SYNC) */
+
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */
+ enum mali_timeline_tracker_type type; /**< Type of tracker. */
+ void *job; /**< Owner of tracker. */
+
+ /* The following fields are used to time out soft job trackers. */
+ u32 os_tick_create;
+ u32 os_tick_activate;
+ mali_bool timer_active;
+};
+
+/**
+ * What follows is a set of functions to check the state of a timeline and to determine where on a
+ * timeline a given point is. Most of these checks will translate the timeline so the oldest point
+ * on the timeline is aligned with zero. Remember that all of these calculation are done on
+ * unsigned integers.
+ *
+ * The following example illustrates the three different states a point can be in. The timeline has
+ * been translated to put the oldest point at zero:
+ *
+ *
+ *
+ * [ point is in forbidden zone ]
+ * 64k wide
+ * MALI_TIMELINE_MAX_POINT_SPAN
+ *
+ * [ point is on timeline ) ( point is released ]
+ *
+ * 0--------------------------##############################--------------------2^32 - 1
+ * ^ ^
+ * \ |
+ * oldest point on timeline |
+ * \
+ * next point on timeline
+ */
+
+/**
+ * Compare two timeline points
+ *
+ * Returns true if a is after b, false if a is before or equal to b.
+ *
+ * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and
+ * the result will be correct if the points is less then UINT_MAX/2 apart.
+ *
+ * @param a Point on timeline
+ * @param b Point on timeline
+ * @return MALI_TRUE if a is after b
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b)
+{
+ return 0 > ((s32)b) - ((s32)a);
+}
+
+/**
+ * Check if a point is on timeline. A point is on a timeline if it is greater than, or equal to,
+ * the oldest point, and less than the next point.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is on timeline, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Check if a point has been released. A point is released if it is older than the oldest point on
+ * the timeline, newer than the next point, and also not in the forbidden zone.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ mali_timeline_point point_normalized;
+ mali_timeline_point next_normalized;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ point_normalized = point - timeline->point_oldest;
+ next_normalized = timeline->point_next - timeline->point_oldest;
+
+ return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
+}
+
+/**
+ * Check if a point is valid. A point is valid if is on the timeline or has been released.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is valid, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point);
+}
+
+/**
+ * Check if timeline is empty (has no points on it). A timeline is empty if next == oldest.
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is empty, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return timeline->point_next == timeline->point_oldest;
+}
+
+/**
+ * Check if timeline is full. A valid timeline cannot span more than 64k points (@ref
+ * MALI_TIMELINE_MAX_POINT_SPAN).
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is full, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Create a new timeline system.
+ *
+ * @param session The session this timeline system will belong to.
+ * @return New timeline system.
+ */
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session);
+
+/**
+ * Abort timeline system.
+ *
+ * This will release all pending waiters in the timeline system causing all trackers to be
+ * activated.
+ *
+ * @param system Timeline system to abort all jobs from.
+ */
+void mali_timeline_system_abort(struct mali_timeline_system *system);
+
+/**
+ * Destroy an empty timeline system.
+ *
+ * @note @ref mali_timeline_system_abort() should be called prior to this function.
+ *
+ * @param system Timeline system to destroy.
+ */
+void mali_timeline_system_destroy(struct mali_timeline_system *system);
+
+/**
+ * Stop the soft job timer.
+ *
+ * @param system Timeline system
+ */
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
+
+/**
+ * Add a tracker to a timeline system and optionally also on a timeline.
+ *
+ * Once added to the timeline system, the tracker is guaranteed to be activated. The tracker can be
+ * activated before this function returns. Thus, it is also possible that the tracker is released
+ * before this function returns, depending on the tracker type.
+ *
+ * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the
+ * timeline system.
+ *
+ * @param system Timeline system the tracker will be added to.
+ * @param tracker The tracker to be added.
+ * @param timeline_id Id of the timeline the tracker will be added to, or
+ * MALI_TIMELINE_NONE if it should not be added on a timeline.
+ * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
+ */
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Get latest point on timeline.
+ *
+ * @param system Timeline system.
+ * @param timeline_id Id of timeline to get latest point from.
+ * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
+ */
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Initialize tracker.
+ *
+ * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker).
+ *
+ * @param tracker Tracker to initialize.
+ * @param type Type of tracker.
+ * @param fence Fence used to set up dependencies for tracker.
+ * @param job Pointer to job struct this tracker is associated with.
+ */
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job);
+
+/**
+ * Grab trigger ref count on tracker.
+ *
+ * This will prevent tracker from being activated until the trigger ref count reaches zero.
+ *
+ * @note Tracker must have been initialized (@ref mali_timeline_tracker_init).
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ */
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker);
+
+/**
+ * Release trigger ref count on tracker.
+ *
+ * If the trigger ref count reaches zero, the tracker will be activated.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error);
+
+/**
+ * Release a tracker from the timeline system.
+ *
+ * This is used to signal that the job being tracker is finished, either due to normal circumstances
+ * (job complete/abort) or due to a timeout.
+ *
+ * We may need to schedule some subsystems after a tracker has been released and the returned
+ * bitmask will tell us if it is necessary. If the return value is non-zero, this value needs to be
+ * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling.
+ *
+ * @note Tracker must have been activated before being released.
+ * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead
+ * to a deadlock.
+ *
+ * @param tracker Tracker being released.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+
+/**
+ * Copy data from a UK fence to a Timeline fence.
+ *
+ * @param fence Timeline fence.
+ * @param uk_fence UK fence.
+ */
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+
+#if defined(DEBUG)
+#define MALI_TIMELINE_DEBUG_FUNCTIONS
+#endif /* DEBUG */
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+/**
+ * Tracker state. Used for debug printing.
+ */
+typedef enum mali_timeline_tracker_state {
+ MALI_TIMELINE_TS_INIT = 0,
+ MALI_TIMELINE_TS_WAITING = 1,
+ MALI_TIMELINE_TS_ACTIVE = 2,
+ MALI_TIMELINE_TS_FINISH = 3,
+} mali_timeline_tracker_state;
+
+/**
+ * Get tracker state.
+ *
+ * @param tracker Tracker to check.
+ * @return State of tracker.
+ */
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about tracker.
+ *
+ * @param tracker Tracker to print.
+ */
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
+
+/**
+ * Print debug information about timeline.
+ *
+ * @param timeline Timeline to print.
+ */
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
+
+/**
+ * Print debug information about timeline system.
+ *
+ * @param system Timeline system to print.
+ */
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#endif /* __MALI_TIMELINE_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_timeline_fence_wait.c b/drivers/gpu/mali/mali/common/mali_timeline_fence_wait.c
new file mode 100644
index 00000000..1eace170
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_timeline_fence_wait.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline_fence_wait.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+
+/**
+ * Allocate a fence waiter tracker.
+ *
+ * @return New fence waiter if successful, NULL if not.
+ */
+static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
+{
+ return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
+}
+
+/**
+ * Free fence waiter tracker.
+ *
+ * @param wait Fence wait tracker to free.
+ */
+static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait)
+{
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ _mali_osk_atomic_term(&wait->refcount);
+ _mali_osk_free(wait);
+}
+
+/**
+ * Check if fence wait tracker has been activated. Used as a wait queue condition.
+ *
+ * @param data Fence waiter.
+ * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+
+ wait = (struct mali_timeline_fence_wait_tracker *) data;
+ MALI_DEBUG_ASSERT_POINTER(wait);
+
+ return wait->activated;
+}
+
+/**
+ * Check if fence has been signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Timeline fence.
+ * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool ret = MALI_TRUE;
+#if defined(CONFIG_SYNC)
+ struct sync_fence *sync_fence = NULL;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ mali_timeline_point point;
+
+ point = fence->points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
+ }
+
+ if (!mali_timeline_is_point_released(timeline, point)) {
+ ret = MALI_FALSE;
+ goto exit;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ if (-1 != fence->sync_fd) {
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+ if (likely(NULL != sync_fence)) {
+ if (0 == sync_fence->status) {
+ ret = MALI_FALSE;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
+ }
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+exit:
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != sync_fence) {
+ sync_fence_put(sync_fence);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ return ret;
+}
+
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+ mali_timeline_point point;
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));
+
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
+ return mali_timeline_fence_wait_check_status(system, fence);
+ }
+
+ wait = mali_timeline_fence_wait_tracker_alloc();
+ if (unlikely(NULL == wait)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
+ return MALI_FALSE;
+ }
+
+ wait->activated = MALI_FALSE;
+ wait->system = system;
+
+ /* Initialize refcount to two references. The reference first will be released by this
+ * function after the wait is over. The second reference will be released when the tracker
+ * is activated. */
+ _mali_osk_atomic_init(&wait->refcount, 2);
+
+ /* Add tracker to timeline system, but not to a timeline. */
+ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
+ point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+ MALI_IGNORE(point);
+
+ /* Wait for the tracker to be activated or time out. */
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
+ } else {
+ _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
+ }
+
+ ret = wait->activated;
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+
+ return ret;
+}
+
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ MALI_DEBUG_ASSERT_POINTER(wait->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
+ wait->activated = MALI_TRUE;
+
+ _mali_osk_wait_queue_wake_up(wait->system->wait_queue);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&wait->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+ MALI_IGNORE(schedule_mask);
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+}
diff --git a/drivers/gpu/mali/mali/common/mali_timeline_fence_wait.h b/drivers/gpu/mali/mali/common/mali_timeline_fence_wait.h
new file mode 100644
index 00000000..393a71d4
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_timeline_fence_wait.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_fence_wait.h
+ *
+ * This file contains functions used to wait until a Timeline fence is signaled.
+ */
+
+#ifndef __MALI_TIMELINE_FENCE_WAIT_H__
+#define __MALI_TIMELINE_FENCE_WAIT_H__
+
+#include "mali_osk.h"
+#include "mali_timeline.h"
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the
+ * function only returns when the fence is signaled.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1)
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return
+ * immediately with the current state of the fence.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0
+
+/**
+ * Fence wait tracker.
+ *
+ * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a
+ * dependency. We will then perform a blocking wait, possibly with a timeout, until the tracker is
+ * activated, which happens when the fence is signaled.
+ */
+struct mali_timeline_fence_wait_tracker {
+ mali_bool activated; /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */
+ _mali_osk_atomic_t refcount; /**< Reference count. */
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Wait for a fence to be signaled, or timeout is reached.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to wait on.
+ * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or
+ * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY.
+ * @return MALI_TRUE if signaled, MALI_FALSE if timed out.
+ */
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout);
+
+/**
+ * Used by the Timeline system to activate a fence wait tracker.
+ *
+ * @param fence_wait_tracker Fence waiter tracker.
+ */
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker);
+
+#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_timeline_sync_fence.c b/drivers/gpu/mali/mali/common/mali_timeline_sync_fence.c
new file mode 100644
index 00000000..ebfa569b
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_timeline_sync_fence.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline_sync_fence.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Creates a sync fence tracker and a sync fence. Adds sync fence tracker to Timeline system and
+ * returns sync fence. The sync fence will be signaled when the sync fence tracker is activated.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return Sync fence that will be signaled when tracker is activated.
+ */
+static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
+ struct sync_fence *sync_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ /* Allocate sync fence tracker. */
+ sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
+ if (NULL == sync_fence_tracker) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
+ return NULL;
+ }
+
+ /* Create sync flag. */
+ MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
+ sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
+ if (NULL == sync_fence_tracker->flag) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Create sync fence from sync flag. */
+ sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
+ mali_sync_flag_put(sync_fence_tracker->flag);
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Setup fence for tracker. */
+ _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
+ fence.sync_fd = -1;
+ fence.points[timeline->id] = point;
+
+ /* Finally, add the tracker to Timeline system. */
+ mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
+ point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+
+ return sync_fence;
+}
+
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ u32 i;
+ struct sync_fence *sync_fence_acc = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ struct sync_fence *sync_fence;
+
+ if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue;
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ /* Merge sync fences. */
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ /* This was the first sync fence created. */
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (-1 != fence->sync_fd) {
+ struct sync_fence *sync_fence;
+
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (NULL == sync_fence_acc) {
+ MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl);
+
+ /* There was nothing to wait on, so return an already signaled fence. */
+
+ sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
+ if (NULL == sync_fence_acc) goto error;
+ }
+
+ /* Return file descriptor for the accumulated sync fence. */
+ return mali_sync_fence_fd_alloc(sync_fence_acc);
+
+error:
+ if (NULL != sync_fence_acc) {
+ sync_fence_put(sync_fence_acc);
+ }
+
+ return -1;
+}
+
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));
+
+ /* Signal flag and release reference. */
+ mali_sync_flag_signal(sync_fence_tracker->flag, 0);
+ mali_sync_flag_put(sync_fence_tracker->flag);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+
+ _mali_osk_free(sync_fence_tracker);
+}
+
+#endif /* defined(CONFIG_SYNC) */
diff --git a/drivers/gpu/mali/mali/common/mali_timeline_sync_fence.h b/drivers/gpu/mali/mali/common/mali_timeline_sync_fence.h
new file mode 100644
index 00000000..54b92d51
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_timeline_sync_fence.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_sync_fence.h
+ *
+ * This file contains code related to creating sync fences from timeline fences.
+ */
+
+#ifndef __MALI_TIMELINE_SYNC_FENCE_H__
+#define __MALI_TIMELINE_SYNC_FENCE_H__
+
+#include "mali_timeline.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Sync fence tracker.
+ */
+struct mali_timeline_sync_fence_tracker {
+ struct mali_sync_flag *flag; /**< Sync flag used to connect tracker and sync fence. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Create a sync fence that will be signaled when @ref fence is signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to create sync fence from.
+ * @return File descriptor for new sync fence, or -1 on error.
+ */
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence);
+
+/**
+ * Used by the Timeline system to activate a sync fence tracker.
+ *
+ * @param sync_fence_tracker Sync fence tracker.
+ *
+ */
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_ukk.h b/drivers/gpu/mali/mali/common/mali_ukk.h
new file mode 100644
index 00000000..be07b793
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_ukk.h
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open(void **context);
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close(void **context);
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args);
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/* @brief Grant or deny high priority scheduling for this session.
+ *
+ * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args);
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args);
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args);
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args);
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args);
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args);
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem(_mali_uk_unmap_external_mem_s *args);
+
+#if defined(CONFIG_MALI400_UMP)
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args);
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args);
+#endif /* CONFIG_MALI400_UMP */
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs);
+
+/**
+ * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
+ *
+ * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job.
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args);
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args);
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args);
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Return the total memory usage
+ *
+ * @param args see _mali_uk_profiling_memory_usage_get_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_memory_usage_get(_mali_uk_profiling_memory_usage_get_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/mali/mali/common/mali_user_settings_db.c b/drivers/gpu/mali/mali/common/mali_user_settings_db.c
new file mode 100644
index 00000000..b5d65216
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_user_settings_db.c
@@ -0,0 +1,147 @@
+/**
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool done = MALI_FALSE;
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
+ sizeof(_mali_uk_settings_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_settings_changed_s *data;
+ data = notobjs[i]->result_buffer;
+
+ data->setting = setting;
+ data->value = value;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+ return;
+ }
+
+ if (mali_user_settings[setting] != value) {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify) {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ return 0;
+ }
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (_MALI_UK_USER_SETTING_MAX > setting) {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ } else {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali/common/mali_user_settings_db.h b/drivers/gpu/mali/mali/common/mali_user_settings_db.h
new file mode 100644
index 00000000..824e3e17
--- /dev/null
+++ b/drivers/gpu/mali/mali/common/mali_user_settings_db.h
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */