* [RFC 01/11] soc: mediatek: MediaTek Command Queue (CMDQ) driver
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2017-09-29 13:09 ` Ulrich Hecht
2017-10-02 10:24 ` CK Hu
2017-09-29 13:09 ` [RFC 02/11] drm/bridge: GPIO-controlled display multiplexer driver Ulrich Hecht
` (5 subsequent siblings)
6 siblings, 1 reply; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Ported from chromeos-3.18 kernel.
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
drivers/soc/mediatek/Kconfig | 10 +
drivers/soc/mediatek/Makefile | 1 +
drivers/soc/mediatek/mtk-cmdq.c | 2814 +++++++++++++++++++++++++++++++++++++++
include/soc/mediatek/cmdq.h | 211 +++
4 files changed, 3036 insertions(+)
create mode 100644 drivers/soc/mediatek/mtk-cmdq.c
create mode 100644 include/soc/mediatek/cmdq.h
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 609bb34..ef271e0 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -1,6 +1,16 @@
#
# MediaTek SoC drivers
#
+config MTK_CMDQ
+ bool "MediaTek CMDQ Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ driver. The CMDQ is used to help read/write registers with critical
+ time limitation, such as updating display configuration during the
+ vblank.
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 12998b0..f7397ef 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/drivers/soc/mediatek/mtk-cmdq.c b/drivers/soc/mediatek/mtk-cmdq.c
new file mode 100644
index 0000000..a8bfb5c
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-cmdq.c
@@ -0,0 +1,2814 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/cmdq.h>
+
+/*
+ * Please calculate this value for each platform.
+ * task number = vblank time / ((task cmds * cmd ticks) / GCE freq)
+ */
+#define CMDQ_MAX_TASK_IN_THREAD 70
+
+#define CMDQ_INITIAL_CMD_BLOCK_SIZE PAGE_SIZE
+#define CMDQ_CMD_BUF_POOL_BUF_SIZE PAGE_SIZE
+#define CMDQ_CMD_BUF_POOL_BUF_NUM 140 /* 2 * 70 = 140 */
+#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
+
+/*
+ * cmdq_thread cookie value is from 0 to CMDQ_MAX_COOKIE_VALUE.
+ * And, this value also be used as MASK.
+ */
+#define CMDQ_MAX_COOKIE_VALUE 0xffff
+#define CMDQ_COOKIE_MASK CMDQ_MAX_COOKIE_VALUE
+
+#define CMDQ_DEFAULT_TIMEOUT_MS 1000
+#define CMDQ_ACQUIRE_THREAD_TIMEOUT_MS 5000
+#define CMDQ_PREALARM_TIMEOUT_NS 200000000
+
+#define CMDQ_INVALID_THREAD -1
+
+#define CMDQ_DRIVER_DEVICE_NAME "mtk_cmdq"
+
+#define CMDQ_CLK_NAME "gce"
+
+#define CMDQ_CURR_IRQ_STATUS_OFFSET 0x010
+#define CMDQ_CURR_LOADED_THR_OFFSET 0x018
+#define CMDQ_THR_SLOT_CYCLES_OFFSET 0x030
+#define CMDQ_THR_EXEC_CYCLES_OFFSET 0x034
+#define CMDQ_THR_TIMEOUT_TIMER_OFFSET 0x038
+#define CMDQ_BUS_CONTROL_TYPE_OFFSET 0x040
+
+#define CMDQ_SYNC_TOKEN_ID_OFFSET 0x060
+#define CMDQ_SYNC_TOKEN_VAL_OFFSET 0x064
+#define CMDQ_SYNC_TOKEN_UPD_OFFSET 0x068
+
+#define CMDQ_GPR_SHIFT 0x004
+#define CMDQ_GPR_OFFSET 0x080
+
+#define CMDQ_THR_SHIFT 0x080
+#define CMDQ_THR_WARM_RESET_OFFSET 0x100
+#define CMDQ_THR_ENABLE_TASK_OFFSET 0x104
+#define CMDQ_THR_SUSPEND_TASK_OFFSET 0x108
+#define CMDQ_THR_CURR_STATUS_OFFSET 0x10c
+#define CMDQ_THR_IRQ_STATUS_OFFSET 0x110
+#define CMDQ_THR_IRQ_ENABLE_OFFSET 0x114
+#define CMDQ_THR_CURR_ADDR_OFFSET 0x120
+#define CMDQ_THR_END_ADDR_OFFSET 0x124
+#define CMDQ_THR_EXEC_CNT_OFFSET 0x128
+#define CMDQ_THR_WAIT_TOKEN_OFFSET 0x130
+#define CMDQ_THR_CFG_OFFSET 0x140
+#define CMDQ_THR_INST_CYCLES_OFFSET 0x150
+#define CMDQ_THR_INST_THRESX_OFFSET 0x154
+#define CMDQ_THR_STATUS_OFFSET 0x18c
+
+#define CMDQ_SYNC_TOKEN_SET BIT(16)
+#define CMDQ_IRQ_MASK 0xffff
+
+#define CMDQ_THR_ENABLED 0x1
+#define CMDQ_THR_DISABLED 0x0
+#define CMDQ_THR_SUSPEND 0x1
+#define CMDQ_THR_RESUME 0x0
+#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
+#define CMDQ_THR_WARM_RESET BIT(0)
+#define CMDQ_THR_SLOT_CYCLES 0x3200
+#define CMDQ_THR_NO_TIMEOUT 0x0
+#define CMDQ_THR_PRIORITY 3
+#define CMDQ_THR_IRQ_DONE 0x1
+#define CMDQ_THR_IRQ_ERROR 0x12
+#define CMDQ_THR_IRQ_EN 0x13 /* done + error */
+#define CMDQ_THR_IRQ_MASK 0x13
+#define CMDQ_THR_EXECUTING BIT(31)
+#define CMDQ_THR_IS_WAITING BIT(31)
+
+#define CMDQ_ARG_A_MASK 0xffffff
+#define CMDQ_ARG_A_WRITE_MASK 0xffff
+#define CMDQ_ARG_A_SUBSYS_MASK 0x1f0000
+#define CMDQ_SUBSYS_MASK 0x1f
+
+#define CMDQ_OP_CODE_SHIFT 24
+#define CMDQ_SUBSYS_SHIFT 16
+
+#define CMDQ_JUMP_BY_OFFSET 0x10000000
+#define CMDQ_JUMP_BY_PA 0x10000001
+#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
+
+#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_WAIT BIT(15)
+#define CMDQ_WFE_WAIT_VALUE 0x1
+
+#define CMDQ_MARK_NON_SUSPENDABLE BIT(21) /* 53 - 32 = 21 */
+#define CMDQ_MARK_NOT_ADD_COUNTER BIT(16) /* 48 - 32 = 16 */
+#define CMDQ_MARK_PREFETCH_MARKER BIT(20)
+#define CMDQ_MARK_PREFETCH_MARKER_EN BIT(17)
+#define CMDQ_MARK_PREFETCH_EN BIT(16)
+
+#define CMDQ_EOC_IRQ_EN BIT(0)
+
+#define CMDQ_ENABLE_MASK BIT(0)
+
+#define CMDQ_OP_CODE_MASK 0xff000000
+
+enum cmdq_thread_index {
+ CMDQ_THR_DISP_DSI0 = 0, /* main: dsi0 */
+ CMDQ_THR_DISP_DPI0, /* sub: dpi0 */
+ CMDQ_MAX_THREAD_COUNT, /* max */
+};
+
+struct cmdq_command {
+ struct cmdq *cqctx;
+ /* bit flag of used engines */
+ u64 engine_flag;
+ /*
+ * pointer of instruction buffer
+ * This must point to an 64-bit aligned u32 array
+ */
+ u32 *va_base;
+ /* size of instruction buffer, in bytes. */
+ size_t block_size;
+};
+
+enum cmdq_code {
+ /* These are actual HW op code. */
+ CMDQ_CODE_MOVE = 0x02,
+ CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_JUMP = 0x10,
+ CMDQ_CODE_WFE = 0x20, /* wait for event (and clear) */
+ CMDQ_CODE_CLEAR_EVENT = 0x21, /* clear event */
+ CMDQ_CODE_EOC = 0x40, /* end of command */
+};
+
+enum cmdq_task_state {
+ TASK_STATE_IDLE, /* free task */
+ TASK_STATE_BUSY, /* task running on a thread */
+ TASK_STATE_KILLED, /* task process being killed */
+ TASK_STATE_ERROR, /* task execution error */
+ TASK_STATE_DONE, /* task finished */
+ TASK_STATE_WAITING, /* allocated but waiting for available thread */
+};
+
+struct cmdq_cmd_buf {
+ atomic_t used;
+ void *va;
+ dma_addr_t pa;
+};
+
+struct cmdq_task_cb {
+ /* called by isr */
+ cmdq_async_flush_cb isr_cb;
+ void *isr_data;
+ /* called by releasing task */
+ cmdq_async_flush_cb done_cb;
+ void *done_data;
+};
+
+struct cmdq_task {
+ struct cmdq *cqctx;
+ struct list_head list_entry;
+
+ /* state for task life cycle */
+ enum cmdq_task_state task_state;
+ /* virtual address of command buffer */
+ u32 *va_base;
+ /* physical address of command buffer */
+ dma_addr_t mva_base;
+ /* size of allocated command buffer */
+ size_t buf_size;
+ /* It points to a cmdq_cmd_buf if this task use command buffer pool. */
+ struct cmdq_cmd_buf *cmd_buf;
+
+ u64 engine_flag;
+ size_t command_size;
+ u32 num_cmd; /* 2 * number of commands */
+ int reorder;
+ /* HW thread ID; CMDQ_INVALID_THREAD if not running */
+ int thread;
+ /* flag of IRQ received */
+ int irq_flag;
+ /* callback functions */
+ struct cmdq_task_cb cb;
+ /* work item when auto release is used */
+ struct work_struct auto_release_work;
+
+ ktime_t submit; /* submit time */
+
+ pid_t caller_pid;
+ char caller_name[TASK_COMM_LEN];
+};
+
+struct cmdq_thread {
+ u32 task_count;
+ u32 wait_cookie;
+ u32 next_cookie;
+ struct cmdq_task *cur_task[CMDQ_MAX_TASK_IN_THREAD];
+};
+
+struct cmdq {
+ struct device *dev;
+
+ void __iomem *base;
+ u32 irq;
+
+ /*
+ * task information
+ * task_cache: struct cmdq_task object cache
+ * task_free_list: unused free tasks
+ * task_active_list: active tasks
+ * task_consume_wait_queue_item: task consumption work item
+ * task_auto_release_wq: auto-release workqueue
+ * task_consume_wq: task consumption workqueue (for queued tasks)
+ */
+ struct kmem_cache *task_cache;
+ struct list_head task_free_list;
+ struct list_head task_active_list;
+ struct list_head task_wait_list;
+ struct work_struct task_consume_wait_queue_item;
+ struct workqueue_struct *task_auto_release_wq;
+ struct workqueue_struct *task_consume_wq;
+
+ struct cmdq_thread thread[CMDQ_MAX_THREAD_COUNT];
+
+ /* mutex, spinlock, flag */
+ struct mutex task_mutex; /* for task list */
+ struct mutex clock_mutex; /* for clock operation */
+ spinlock_t thread_lock; /* for cmdq hardware thread */
+ int thread_usage;
+ spinlock_t exec_lock; /* for exec task */
+
+ /* command buffer pool */
+ struct cmdq_cmd_buf cmd_buf_pool[CMDQ_CMD_BUF_POOL_BUF_NUM];
+
+ /*
+ * notification
+ * wait_queue: for task done
+ * thread_dispatch_queue: for thread acquiring
+ */
+ wait_queue_head_t wait_queue[CMDQ_MAX_THREAD_COUNT];
+ wait_queue_head_t thread_dispatch_queue;
+
+ /* ccf */
+ struct clk *clock;
+};
+
+struct cmdq_event_item {
+ enum cmdq_event event;
+ const char *name;
+};
+
+struct cmdq_subsys {
+ u32 base_addr;
+ int id;
+ const char *name;
+};
+
+static const struct cmdq_event_item cmdq_events[] = {
+ /* Display start of frame(SOF) events */
+ {CMDQ_EVENT_DISP_OVL0_SOF, "CMDQ_EVENT_DISP_OVL0_SOF"},
+ {CMDQ_EVENT_DISP_OVL1_SOF, "CMDQ_EVENT_DISP_OVL1_SOF"},
+ {CMDQ_EVENT_DISP_RDMA0_SOF, "CMDQ_EVENT_DISP_RDMA0_SOF"},
+ {CMDQ_EVENT_DISP_RDMA1_SOF, "CMDQ_EVENT_DISP_RDMA1_SOF"},
+ {CMDQ_EVENT_DISP_RDMA2_SOF, "CMDQ_EVENT_DISP_RDMA2_SOF"},
+ {CMDQ_EVENT_DISP_WDMA0_SOF, "CMDQ_EVENT_DISP_WDMA0_SOF"},
+ {CMDQ_EVENT_DISP_WDMA1_SOF, "CMDQ_EVENT_DISP_WDMA1_SOF"},
+ /* Display end of frame(EOF) events */
+ {CMDQ_EVENT_DISP_OVL0_EOF, "CMDQ_EVENT_DISP_OVL0_EOF"},
+ {CMDQ_EVENT_DISP_OVL1_EOF, "CMDQ_EVENT_DISP_OVL1_EOF"},
+ {CMDQ_EVENT_DISP_RDMA0_EOF, "CMDQ_EVENT_DISP_RDMA0_EOF"},
+ {CMDQ_EVENT_DISP_RDMA1_EOF, "CMDQ_EVENT_DISP_RDMA1_EOF"},
+ {CMDQ_EVENT_DISP_RDMA2_EOF, "CMDQ_EVENT_DISP_RDMA2_EOF"},
+ {CMDQ_EVENT_DISP_WDMA0_EOF, "CMDQ_EVENT_DISP_WDMA0_EOF"},
+ {CMDQ_EVENT_DISP_WDMA1_EOF, "CMDQ_EVENT_DISP_WDMA1_EOF"},
+ /* Mutex end of frame(EOF) events */
+ {CMDQ_EVENT_MUTEX0_STREAM_EOF, "CMDQ_EVENT_MUTEX0_STREAM_EOF"},
+ {CMDQ_EVENT_MUTEX1_STREAM_EOF, "CMDQ_EVENT_MUTEX1_STREAM_EOF"},
+ {CMDQ_EVENT_MUTEX2_STREAM_EOF, "CMDQ_EVENT_MUTEX2_STREAM_EOF"},
+ {CMDQ_EVENT_MUTEX3_STREAM_EOF, "CMDQ_EVENT_MUTEX3_STREAM_EOF"},
+ {CMDQ_EVENT_MUTEX4_STREAM_EOF, "CMDQ_EVENT_MUTEX4_STREAM_EOF"},
+ /* Display underrun events */
+ {CMDQ_EVENT_DISP_RDMA0_UNDERRUN, "CMDQ_EVENT_DISP_RDMA0_UNDERRUN"},
+ {CMDQ_EVENT_DISP_RDMA1_UNDERRUN, "CMDQ_EVENT_DISP_RDMA1_UNDERRUN"},
+ {CMDQ_EVENT_DISP_RDMA2_UNDERRUN, "CMDQ_EVENT_DISP_RDMA2_UNDERRUN"},
+ /* Keep this at the end of HW events */
+ {CMDQ_MAX_HW_EVENT_COUNT, "CMDQ_MAX_HW_EVENT_COUNT"},
+ /* This is max event and also can be used as mask. */
+ {CMDQ_SYNC_TOKEN_MAX, "CMDQ_SYNC_TOKEN_MAX"},
+ /* Invalid event */
+ {CMDQ_SYNC_TOKEN_INVALID, "CMDQ_SYNC_TOKEN_INVALID"},
+};
+
+static const struct cmdq_subsys g_subsys[] = {
+ {0x1400, 1, "MMSYS"},
+ {0x1401, 2, "DISP"},
+ {0x1402, 3, "DISP"},
+};
+
+static const char *cmdq_event_get_name(enum cmdq_event event)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cmdq_events); i++)
+ if (cmdq_events[i].event == event)
+ return cmdq_events[i].name;
+
+ return "CMDQ_EVENT_UNKNOWN";
+}
+
+static void cmdq_event_reset(struct cmdq *cqctx)
+{
+ int i;
+
+ /* set all defined HW events to 0 */
+ for (i = 0; i < ARRAY_SIZE(cmdq_events); i++) {
+ if (cmdq_events[i].event >= CMDQ_MAX_HW_EVENT_COUNT)
+ break;
+ writel(cmdq_events[i].event,
+ cqctx->base + CMDQ_SYNC_TOKEN_UPD_OFFSET);
+ }
+}
+
+static int cmdq_subsys_base_addr_to_id(u32 base_addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(g_subsys); i++) {
+ if (g_subsys[i].base_addr == base_addr)
+ return g_subsys[i].id;
+ }
+
+ return -EFAULT;
+}
+
+static u32 cmdq_subsys_id_to_base_addr(int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(g_subsys); i++) {
+ if (g_subsys[i].id == id)
+ return g_subsys[i].base_addr;
+ }
+
+ return 0;
+}
+
+static const char *cmdq_subsys_base_addr_to_name(u32 base_addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(g_subsys); i++)
+ if (g_subsys[i].base_addr == base_addr)
+ return g_subsys[i].name;
+
+ return NULL;
+}
+
+static int cmdq_eng_get_thread(u64 flag)
+{
+ if (flag & BIT_ULL(CMDQ_ENG_DISP_DSI0))
+ return CMDQ_THR_DISP_DSI0;
+ else /* CMDQ_ENG_DISP_DPI0 */
+ return CMDQ_THR_DISP_DPI0;
+}
+
+static const char *cmdq_event_get_module(enum cmdq_event event)
+{
+ const char *module;
+
+ switch (event) {
+ case CMDQ_EVENT_DISP_RDMA0_SOF:
+ case CMDQ_EVENT_DISP_RDMA1_SOF:
+ case CMDQ_EVENT_DISP_RDMA2_SOF:
+ case CMDQ_EVENT_DISP_RDMA0_EOF:
+ case CMDQ_EVENT_DISP_RDMA1_EOF:
+ case CMDQ_EVENT_DISP_RDMA2_EOF:
+ case CMDQ_EVENT_DISP_RDMA0_UNDERRUN:
+ case CMDQ_EVENT_DISP_RDMA1_UNDERRUN:
+ case CMDQ_EVENT_DISP_RDMA2_UNDERRUN:
+ module = "DISP_RDMA";
+ break;
+ case CMDQ_EVENT_DISP_WDMA0_SOF:
+ case CMDQ_EVENT_DISP_WDMA1_SOF:
+ case CMDQ_EVENT_DISP_WDMA0_EOF:
+ case CMDQ_EVENT_DISP_WDMA1_EOF:
+ module = "DISP_WDMA";
+ break;
+ case CMDQ_EVENT_DISP_OVL0_SOF:
+ case CMDQ_EVENT_DISP_OVL1_SOF:
+ case CMDQ_EVENT_DISP_OVL0_EOF:
+ case CMDQ_EVENT_DISP_OVL1_EOF:
+ module = "DISP_OVL";
+ break;
+ case CMDQ_EVENT_MUTEX0_STREAM_EOF ... CMDQ_EVENT_MUTEX4_STREAM_EOF:
+ module = "DISP";
+ break;
+ default:
+ module = "CMDQ";
+ break;
+ }
+
+ return module;
+}
+
+static u32 cmdq_thread_get_cookie(struct cmdq *cqctx, int tid)
+{
+ return readl(cqctx->base + CMDQ_THR_EXEC_CNT_OFFSET +
+ CMDQ_THR_SHIFT * tid) & CMDQ_COOKIE_MASK;
+}
+
+static int cmdq_cmd_buf_pool_init(struct cmdq *cqctx)
+{
+ struct device *dev = cqctx->dev;
+ int i;
+ int ret = 0;
+ struct cmdq_cmd_buf *buf;
+
+ for (i = 0; i < ARRAY_SIZE(cqctx->cmd_buf_pool); i++) {
+ buf = &cqctx->cmd_buf_pool[i];
+ buf->va = dma_alloc_coherent(dev, CMDQ_CMD_BUF_POOL_BUF_SIZE,
+ &buf->pa, GFP_KERNEL);
+ if (!buf->va) {
+ dev_err(dev, "failed to alloc cmdq_cmd_buf\n");
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+ }
+
+ return 0;
+
+fail_alloc:
+ for (i -= 1; i >= 0 ; i--) {
+ buf = &cqctx->cmd_buf_pool[i];
+ dma_free_coherent(dev, CMDQ_CMD_BUF_POOL_BUF_SIZE, buf->va,
+ buf->pa);
+ }
+
+ return ret;
+}
+
+static void cmdq_cmd_buf_pool_uninit(struct cmdq *cqctx)
+{
+ struct device *dev = cqctx->dev;
+ int i;
+ struct cmdq_cmd_buf *buf;
+
+ for (i = 0; i < ARRAY_SIZE(cqctx->cmd_buf_pool); i++) {
+ buf = &cqctx->cmd_buf_pool[i];
+ dma_free_coherent(dev, CMDQ_CMD_BUF_POOL_BUF_SIZE, buf->va,
+ buf->pa);
+ if (atomic_read(&buf->used))
+ dev_err(dev,
+ "cmdq_cmd_buf[%d] va:0x%p still in use\n",
+ i, buf->va);
+ }
+}
+
+static struct cmdq_cmd_buf *cmdq_cmd_buf_pool_get(struct cmdq *cqctx)
+{
+ int i;
+ struct cmdq_cmd_buf *buf;
+
+ for (i = 0; i < ARRAY_SIZE(cqctx->cmd_buf_pool); i++) {
+ buf = &cqctx->cmd_buf_pool[i];
+ if (!atomic_cmpxchg(&buf->used, 0, 1))
+ return buf;
+ }
+
+ return NULL;
+}
+
+static void cmdq_cmd_buf_pool_put(struct cmdq_cmd_buf *buf)
+{
+ atomic_set(&buf->used, 0);
+}
+
+static int cmdq_subsys_from_phys_addr(struct cmdq *cqctx, u32 cmdq_phys_addr)
+{
+ u32 base_addr = cmdq_phys_addr >> 16;
+ int subsys = cmdq_subsys_base_addr_to_id(base_addr);
+
+ if (subsys < 0)
+ dev_err(cqctx->dev,
+ "unknown subsys: error=%d, phys=0x%08x\n",
+ subsys, cmdq_phys_addr);
+
+ return subsys;
+}
+
+/*
+ * It's a kmemcache creator for cmdq_task to initialize variables
+ * without command buffer.
+ */
+static void cmdq_task_ctor(void *param)
+{
+ struct cmdq_task *task = param;
+
+ memset(task, 0, sizeof(*task));
+ INIT_LIST_HEAD(&task->list_entry);
+ task->task_state = TASK_STATE_IDLE;
+ task->thread = CMDQ_INVALID_THREAD;
+}
+
+static void cmdq_task_free_command_buffer(struct cmdq_task *task)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+
+ if (!task->va_base)
+ return;
+
+ if (task->cmd_buf)
+ cmdq_cmd_buf_pool_put(task->cmd_buf);
+ else
+ dma_free_coherent(dev, task->buf_size, task->va_base,
+ task->mva_base);
+
+ task->va_base = NULL;
+ task->mva_base = 0;
+ task->buf_size = 0;
+ task->command_size = 0;
+ task->num_cmd = 0;
+ task->cmd_buf = NULL;
+}
+
+/*
+ * Ensure size of command buffer in the given cmdq_task.
+ * Existing buffer data will be copied to new buffer.
+ * This buffer is guaranteed to be physically continuous.
+ * returns -ENOMEM if cannot allocate new buffer
+ */
+static int cmdq_task_realloc_command_buffer(struct cmdq_task *task, size_t size)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+ void *new_buf = NULL;
+ dma_addr_t new_mva_base;
+ size_t cmd_size;
+ u32 num_cmd;
+ struct cmdq_cmd_buf *cmd_buf = NULL;
+
+ if (task->va_base && task->buf_size >= size)
+ return 0;
+
+ /* try command pool first */
+ if (size <= CMDQ_CMD_BUF_POOL_BUF_SIZE) {
+ cmd_buf = cmdq_cmd_buf_pool_get(cqctx);
+ if (cmd_buf) {
+ new_buf = cmd_buf->va;
+ new_mva_base = cmd_buf->pa;
+ memset(new_buf, 0, CMDQ_CMD_BUF_POOL_BUF_SIZE);
+ }
+ }
+
+ if (!new_buf) {
+ new_buf = dma_alloc_coherent(dev, size, &new_mva_base,
+ GFP_KERNEL);
+ if (!new_buf) {
+ dev_err(dev, "realloc cmd buffer of size %zu failed\n",
+ size);
+ return -ENOMEM;
+ }
+ }
+
+ /* copy and release old buffer */
+ if (task->va_base)
+ memcpy(new_buf, task->va_base, task->buf_size);
+
+ /*
+ * we should keep track of num_cmd and cmd_size
+ * since they are cleared in free command buffer
+ */
+ num_cmd = task->num_cmd;
+ cmd_size = task->command_size;
+ cmdq_task_free_command_buffer(task);
+
+ /* attach the new buffer */
+ task->va_base = new_buf;
+ task->mva_base = new_mva_base;
+ task->buf_size = cmd_buf ? CMDQ_CMD_BUF_POOL_BUF_SIZE : size;
+ task->num_cmd = num_cmd;
+ task->command_size = cmd_size;
+ task->cmd_buf = cmd_buf;
+
+ return 0;
+}
+
+/* allocate and initialize struct cmdq_task and its command buffer */
+static struct cmdq_task *cmdq_task_create(struct cmdq *cqctx)
+{
+ struct device *dev = cqctx->dev;
+ struct cmdq_task *task;
+ int status;
+
+ task = kmem_cache_alloc(cqctx->task_cache, GFP_KERNEL);
+ task->cqctx = cqctx;
+ status = cmdq_task_realloc_command_buffer(
+ task, CMDQ_INITIAL_CMD_BLOCK_SIZE);
+ if (status < 0) {
+ dev_err(dev, "allocate command buffer failed\n");
+ kmem_cache_free(cqctx->task_cache, task);
+ return NULL;
+ }
+ return task;
+}
+
+static int cmdq_dev_init(struct platform_device *pdev, struct cmdq *cqctx)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cqctx->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cqctx->base)) {
+ dev_err(dev, "failed to ioremap gce\n");
+ return PTR_ERR(cqctx->base);
+ }
+
+ cqctx->irq = irq_of_parse_and_map(node, 0);
+ if (!cqctx->irq) {
+ dev_err(dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
+ dev, cqctx->base, cqctx->irq);
+ return 0;
+}
+
+static void cmdq_task_release_unlocked(struct cmdq_task *task)
+{
+ struct cmdq *cqctx = task->cqctx;
+
+ /* This func should be inside cqctx->task_mutex mutex */
+ lockdep_assert_held(&cqctx->task_mutex);
+
+ task->task_state = TASK_STATE_IDLE;
+ task->thread = CMDQ_INVALID_THREAD;
+
+ cmdq_task_free_command_buffer(task);
+
+ /*
+ * move from active/waiting list to free list
+ * todo: shrink free list
+ */
+ list_move_tail(&task->list_entry, &cqctx->task_free_list);
+}
+
+static void cmdq_task_release_internal(struct cmdq_task *task)
+{
+ struct cmdq *cqctx = task->cqctx;
+
+ mutex_lock(&cqctx->task_mutex);
+ cmdq_task_release_unlocked(task);
+ mutex_unlock(&cqctx->task_mutex);
+}
+
+static struct cmdq_task *cmdq_core_find_free_task(struct cmdq *cqctx)
+{
+ struct cmdq_task *task;
+
+ mutex_lock(&cqctx->task_mutex);
+
+ /*
+ * Pick from free list first;
+ * create one if there is no free entry.
+ */
+ if (list_empty(&cqctx->task_free_list)) {
+ task = cmdq_task_create(cqctx);
+ } else {
+ task = list_first_entry(&cqctx->task_free_list,
+ struct cmdq_task, list_entry);
+ /* remove from free list */
+ list_del_init(&task->list_entry);
+ }
+
+ mutex_unlock(&cqctx->task_mutex);
+
+ return task;
+}
+
+/* After dropping error task, we have to reorder remaining valid tasks. */
+static void cmdq_thread_reorder_task_array(struct cmdq_thread *thread,
+ int prev_id)
+{
+ int i, j;
+ int next_id, search_id;
+ int reorder_count = 0;
+ struct cmdq_task *task;
+
+ next_id = prev_id + 1;
+ for (i = 1; i < (CMDQ_MAX_TASK_IN_THREAD - 1); i++, next_id++) {
+ if (next_id >= CMDQ_MAX_TASK_IN_THREAD)
+ next_id = 0;
+
+ if (thread->cur_task[next_id])
+ break;
+
+ search_id = next_id + 1;
+ for (j = (i + 1); j < CMDQ_MAX_TASK_IN_THREAD;
+ j++, search_id++) {
+ if (search_id >= CMDQ_MAX_TASK_IN_THREAD)
+ search_id = 0;
+
+ if (thread->cur_task[search_id]) {
+ thread->cur_task[next_id] =
+ thread->cur_task[search_id];
+ thread->cur_task[search_id] = NULL;
+ if ((j - i) > reorder_count)
+ reorder_count = j - i;
+
+ break;
+ }
+ }
+
+ task = thread->cur_task[next_id];
+ if ((task->va_base[task->num_cmd - 1] == CMDQ_JUMP_BY_OFFSET) &&
+ (task->va_base[task->num_cmd - 2] == CMDQ_JUMP_PASS)) {
+ /* We reached the last task */
+ break;
+ }
+ }
+
+ thread->next_cookie -= reorder_count;
+}
+
+static int cmdq_core_sync_command(struct cmdq_task *task,
+ struct cmdq_command *cmd_desc)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+ int status;
+ size_t size;
+
+ size = task->command_size + CMDQ_INST_SIZE;
+ status = cmdq_task_realloc_command_buffer(task, size);
+ if (status < 0) {
+ dev_err(dev, "failed to realloc command buffer\n");
+ dev_err(dev, "task=0x%p, request size=%zu\n", task, size);
+ return status;
+ }
+
+ /* copy the commands to our DMA buffer */
+ memcpy(task->va_base, cmd_desc->va_base, cmd_desc->block_size);
+
+ /* re-adjust num_cmd according to command_size */
+ task->num_cmd = task->command_size / sizeof(task->va_base[0]);
+
+ return 0;
+}
+
+static struct cmdq_task *cmdq_core_acquire_task(struct cmdq_command *cmd_desc,
+ struct cmdq_task_cb *cb)
+{
+ struct cmdq *cqctx = cmd_desc->cqctx;
+ struct device *dev = cqctx->dev;
+ struct cmdq_task *task;
+
+ task = cmdq_core_find_free_task(cqctx);
+ if (!task) {
+ dev_err(dev, "can't acquire task info\n");
+ return NULL;
+ }
+
+ /* initialize field values */
+ task->engine_flag = cmd_desc->engine_flag;
+ task->task_state = TASK_STATE_WAITING;
+ task->reorder = 0;
+ task->thread = CMDQ_INVALID_THREAD;
+ task->irq_flag = 0x0;
+ if (cb)
+ task->cb = *cb;
+ else
+ memset(&task->cb, 0, sizeof(task->cb));
+ task->command_size = cmd_desc->block_size;
+
+ /* store caller info for debug */
+ if (current) {
+ task->caller_pid = current->pid;
+ memcpy(task->caller_name, current->comm, sizeof(current->comm));
+ }
+
+ if (cmdq_core_sync_command(task, cmd_desc) < 0) {
+ dev_err(dev, "fail to sync command\n");
+ cmdq_task_release_internal(task);
+ return NULL;
+ }
+
+ /* insert into waiting list to process */
+ if (task) {
+ task->submit = ktime_get();
+ mutex_lock(&cqctx->task_mutex);
+ list_add_tail(&task->list_entry, &cqctx->task_wait_list);
+ mutex_unlock(&cqctx->task_mutex);
+ }
+
+ return task;
+}
+
+static int cmdq_clk_enable(struct cmdq *cqctx)
+{
+ struct device *dev = cqctx->dev;
+ int ret = 0;
+
+ if (cqctx->thread_usage == 0) {
+ ret = clk_prepare_enable(cqctx->clock);
+ if (ret) {
+ dev_err(dev, "prepare and enable clk:%s fail\n",
+ CMDQ_CLK_NAME);
+ return ret;
+ }
+ cmdq_event_reset(cqctx);
+ }
+ cqctx->thread_usage++;
+
+ return ret;
+}
+
+static void cmdq_clk_disable(struct cmdq *cqctx)
+{
+ cqctx->thread_usage--;
+ if (cqctx->thread_usage <= 0)
+ clk_disable_unprepare(cqctx->clock);
+}
+
+static int cmdq_core_find_free_thread(struct cmdq *cqctx, int tid)
+{
+ struct cmdq_thread *thread = cqctx->thread;
+ u32 next_cookie;
+
+ /*
+ * make sure the found thread has enough space for the task;
+ * cmdq_thread->cur_task has size limitation.
+ */
+ if (thread[tid].task_count >= CMDQ_MAX_TASK_IN_THREAD) {
+ dev_warn(cqctx->dev, "thread(%d) task count = %d\n",
+ tid, thread[tid].task_count);
+ return CMDQ_INVALID_THREAD;
+ }
+
+ next_cookie = thread[tid].next_cookie % CMDQ_MAX_TASK_IN_THREAD;
+ if (thread[tid].cur_task[next_cookie]) {
+ dev_warn(cqctx->dev, "thread(%d) next cookie = %d\n",
+ tid, next_cookie);
+ return CMDQ_INVALID_THREAD;
+ }
+
+ return tid;
+}
+
+static struct cmdq_thread *cmdq_core_acquire_thread(struct cmdq *cqctx,
+ int candidate_tid)
+{
+ int tid;
+
+ tid = cmdq_core_find_free_thread(cqctx, candidate_tid);
+ if (tid != CMDQ_INVALID_THREAD) {
+ mutex_lock(&cqctx->clock_mutex);
+ cmdq_clk_enable(cqctx);
+ mutex_unlock(&cqctx->clock_mutex);
+ return &cqctx->thread[tid];
+ }
+ return NULL;
+}
+
+static void cmdq_core_release_thread(struct cmdq *cqctx, int tid)
+{
+ if (WARN_ON(tid == CMDQ_INVALID_THREAD))
+ return;
+
+ mutex_lock(&cqctx->clock_mutex);
+ cmdq_clk_disable(cqctx);
+ mutex_unlock(&cqctx->clock_mutex);
+}
+
+static void cmdq_task_remove_thread(struct cmdq_task *task)
+{
+ int tid = task->thread;
+
+ task->thread = CMDQ_INVALID_THREAD;
+ cmdq_core_release_thread(task->cqctx, tid);
+}
+
+static int cmdq_thread_suspend(struct cmdq *cqctx, int tid)
+{
+ struct device *dev = cqctx->dev;
+ void __iomem *gce_base = cqctx->base;
+ u32 enabled;
+ u32 status;
+
+ /* write suspend bit */
+ writel(CMDQ_THR_SUSPEND,
+ gce_base + CMDQ_THR_SUSPEND_TASK_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ /* If already disabled, treat as suspended successful. */
+ enabled = readl(gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ if (!(enabled & CMDQ_THR_ENABLED))
+ return 0;
+
+ /* poll suspended status */
+ if (readl_poll_timeout_atomic(gce_base +
+ CMDQ_THR_CURR_STATUS_OFFSET +
+ CMDQ_THR_SHIFT * tid,
+ status,
+ status & CMDQ_THR_STATUS_SUSPENDED,
+ 0, 10)) {
+ dev_err(dev, "Suspend HW thread %d failed\n", tid);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_resume(struct cmdq *cqctx, int tid)
+{
+ void __iomem *gce_base = cqctx->base;
+
+ writel(CMDQ_THR_RESUME,
+ gce_base + CMDQ_THR_SUSPEND_TASK_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+}
+
+static int cmdq_thread_reset(struct cmdq *cqctx, int tid)
+{
+ struct device *dev = cqctx->dev;
+ void __iomem *gce_base = cqctx->base;
+ u32 warm_reset;
+
+ writel(CMDQ_THR_WARM_RESET,
+ gce_base + CMDQ_THR_WARM_RESET_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ if (readl_poll_timeout_atomic(gce_base + CMDQ_THR_WARM_RESET_OFFSET +
+ CMDQ_THR_SHIFT * tid,
+ warm_reset,
+ !(warm_reset & CMDQ_THR_WARM_RESET),
+ 0, 10)) {
+ dev_err(dev, "Reset HW thread %d failed\n", tid);
+ return -EFAULT;
+ }
+
+ writel(CMDQ_THR_SLOT_CYCLES, gce_base + CMDQ_THR_SLOT_CYCLES_OFFSET);
+ return 0;
+}
+
+static int cmdq_thread_disable(struct cmdq *cqctx, int tid)
+{
+ void __iomem *gce_base = cqctx->base;
+
+ cmdq_thread_reset(cqctx, tid);
+ writel(CMDQ_THR_DISABLED,
+ gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ return 0;
+}
+
+static u32 *cmdq_task_get_pc_and_inst(const struct cmdq_task *task, int tid,
+ u32 insts[2])
+{
+ struct cmdq *cqctx;
+ void __iomem *gce_base;
+ unsigned long pc_pa;
+ u8 *pc_va;
+ u8 *cmd_end;
+
+ memset(insts, 0, sizeof(u32) * 2);
+
+ if (!task ||
+ !task->va_base ||
+ tid == CMDQ_INVALID_THREAD) {
+ pr_err("cmdq get pc failed since invalid param, task 0x%p, task->va_base:0x%p, thread:%d\n",
+ task, task->va_base, tid);
+ return NULL;
+ }
+
+ cqctx = task->cqctx;
+ gce_base = cqctx->base;
+
+ pc_pa = (unsigned long)readl(gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ pc_va = (u8 *)task->va_base + (pc_pa - task->mva_base);
+ cmd_end = (u8 *)(task->va_base + task->num_cmd - 1);
+
+ if (((u8 *)task->va_base <= pc_va) && (pc_va <= cmd_end)) {
+ if (pc_va < cmd_end) {
+ /* get arg_a and arg_b */
+ insts[0] = readl(pc_va);
+ insts[1] = readl(pc_va + 4);
+ } else {
+ /* get arg_a and arg_b of previous cmd */
+ insts[0] = readl(pc_va - 8);
+ insts[1] = readl(pc_va - 4);
+ }
+ } else {
+ return NULL;
+ }
+
+ return (u32 *)pc_va;
+}
+
+static const char *cmdq_core_parse_module_from_subsys(u32 arg_a)
+{
+ int id = (arg_a & CMDQ_ARG_A_SUBSYS_MASK) >> CMDQ_SUBSYS_SHIFT;
+ u32 base_addr = cmdq_subsys_id_to_base_addr(id);
+ const char *module = cmdq_subsys_base_addr_to_name(base_addr);
+
+ return module ? module : "CMDQ";
+}
+
+static const char *cmdq_core_parse_op(u32 op_code)
+{
+ switch (op_code) {
+ case CMDQ_CODE_WRITE:
+ return "WRIT";
+ case CMDQ_CODE_WFE:
+ return "SYNC";
+ case CMDQ_CODE_MOVE:
+ return "MASK";
+ case CMDQ_CODE_JUMP:
+ return "JUMP";
+ case CMDQ_CODE_EOC:
+ return "MARK";
+ }
+ return NULL;
+}
+
+static void cmdq_core_parse_error(struct cmdq_task *task, int tid,
+ const char **module_name, int *flag,
+ u32 *inst_a, u32 *inst_b)
+{
+ int irq_flag = task->irq_flag;
+ u32 insts[2] = { 0 };
+ const char *module;
+
+ /*
+ * other cases, use instruction to judge
+ * because engine flag are not sufficient
+ */
+ if (cmdq_task_get_pc_and_inst(task, tid, insts)) {
+ u32 op, arg_a, arg_b;
+
+ op = insts[1] >> CMDQ_OP_CODE_SHIFT;
+ arg_a = insts[1] & CMDQ_ARG_A_MASK;
+ arg_b = insts[0];
+
+ switch (op) {
+ case CMDQ_CODE_WRITE:
+ module = cmdq_core_parse_module_from_subsys(arg_a);
+ break;
+ case CMDQ_CODE_WFE:
+ /* arg_a is the event id */
+ module = cmdq_event_get_module((enum cmdq_event)arg_a);
+ break;
+ case CMDQ_CODE_MOVE:
+ case CMDQ_CODE_JUMP:
+ case CMDQ_CODE_EOC:
+ default:
+ module = "CMDQ";
+ break;
+ }
+ } else {
+ module = "CMDQ";
+ }
+
+ /* fill output parameter */
+ *module_name = module;
+ *flag = irq_flag;
+ *inst_a = insts[1];
+ *inst_b = insts[0];
+}
+
+static void cmdq_thread_insert_task_by_cookie(struct cmdq_thread *thread,
+ struct cmdq_task *task,
+ int cookie)
+{
+ thread->wait_cookie = cookie;
+ thread->next_cookie = cookie + 1;
+ if (thread->next_cookie > CMDQ_MAX_COOKIE_VALUE)
+ thread->next_cookie = 0;
+
+ /* first task, so set to 1 */
+ thread->task_count = 1;
+
+ thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD] = task;
+}
+
+static int cmdq_thread_remove_task_by_index(struct cmdq_thread *thread,
+ int index,
+ enum cmdq_task_state new_state)
+{
+ struct cmdq_task *task;
+ struct device *dev;
+
+ task = thread->cur_task[index];
+ if (!task) {
+ pr_err("%s: remove fail, task:%d on thread:0x%p is NULL\n",
+ __func__, index, thread);
+ return -EINVAL;
+ }
+ dev = task->cqctx->dev;
+
+ /*
+ * note timing to switch a task to done_status(_ERROR, _KILLED, _DONE)
+ * is aligned with thread's taskcount change
+ * check task status to prevent double clean-up thread's taskcount
+ */
+ if (task->task_state != TASK_STATE_BUSY) {
+ dev_err(dev, "remove task failed\n");
+ dev_err(dev, "state:%d. thread:0x%p, task:%d, new_state:%d\n",
+ task->task_state, thread, index, new_state);
+ return -EINVAL;
+ }
+
+ if (thread->task_count == 0) {
+ dev_err(dev, "no task to remove\n");
+ dev_err(dev, "thread:%d, index:%d\n", task->thread, index);
+ return -EINVAL;
+ }
+
+ task->task_state = new_state;
+ thread->cur_task[index] = NULL;
+ thread->task_count--;
+
+ return 0;
+}
+
+static int cmdq_thread_force_remove_task(struct cmdq_task *task, int tid)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct cmdq_thread *thread = &cqctx->thread[tid];
+ void __iomem *gce_base = cqctx->base;
+ int status;
+ int cookie;
+ struct cmdq_task *exec_task;
+
+ status = cmdq_thread_suspend(cqctx, tid);
+
+ writel(CMDQ_THR_NO_TIMEOUT,
+ gce_base + CMDQ_THR_INST_CYCLES_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ /* The cookie of the task currently being processed */
+ cookie = cmdq_thread_get_cookie(cqctx, tid) + 1;
+
+ exec_task = thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD];
+ if (exec_task && exec_task == task) {
+ dma_addr_t eoc_pa = task->mva_base + task->command_size - 16;
+
+ /* The task is executed now, set the PC to EOC for bypass */
+ writel(eoc_pa,
+ gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD] = NULL;
+ task->task_state = TASK_STATE_KILLED;
+ } else {
+ int i, j;
+
+ j = thread->task_count;
+ for (i = cookie; j > 0; j--, i++) {
+ i %= CMDQ_MAX_TASK_IN_THREAD;
+
+ exec_task = thread->cur_task[i];
+ if (!exec_task)
+ continue;
+
+ if ((exec_task->va_base[exec_task->num_cmd - 1] ==
+ CMDQ_JUMP_BY_OFFSET) &&
+ (exec_task->va_base[exec_task->num_cmd - 2] ==
+ CMDQ_JUMP_PASS)) {
+ /* reached the last task */
+ break;
+ }
+
+ if (exec_task->va_base[exec_task->num_cmd - 2] ==
+ task->mva_base) {
+ /* fake EOC command */
+ exec_task->va_base[exec_task->num_cmd - 2] =
+ CMDQ_EOC_IRQ_EN;
+ exec_task->va_base[exec_task->num_cmd - 1] =
+ CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT;
+
+ /* bypass the task */
+ exec_task->va_base[exec_task->num_cmd] =
+ task->va_base[task->num_cmd - 2];
+ exec_task->va_base[exec_task->num_cmd + 1] =
+ task->va_base[task->num_cmd - 1];
+
+ i = (i + 1) % CMDQ_MAX_TASK_IN_THREAD;
+
+ thread->cur_task[i] = NULL;
+ task->task_state = TASK_STATE_KILLED;
+ status = 0;
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+static struct cmdq_task *cmdq_thread_search_task_by_pc(
+ const struct cmdq_thread *thread, u32 pc)
+{
+ struct cmdq_task *task;
+ int i;
+
+ for (i = 0; i < CMDQ_MAX_TASK_IN_THREAD; i++) {
+ task = thread->cur_task[i];
+ if (task &&
+ pc >= task->mva_base &&
+ pc <= task->mva_base + task->command_size)
+ break;
+ }
+
+ return task;
+}
+
+/*
+ * Re-fetch thread's command buffer
+ * Use Case:
+ * If SW modifies command buffer content after SW configed commands to GCE,
+ * SW should notify GCE to re-fetch commands in order to
+ * prevent inconsistent command buffer content between DRAM and GCE's SRAM.
+ */
+static void cmdq_core_invalidate_hw_fetched_buffer(struct cmdq *cqctx,
+ int tid)
+{
+ void __iomem *pc_va;
+ u32 pc;
+
+ /*
+ * Setting HW thread PC will invoke that
+ * GCE (CMDQ HW) gives up fetched command buffer,
+ * and fetch command from DRAM to GCE's SRAM again.
+ */
+ pc_va = cqctx->base + CMDQ_THR_CURR_ADDR_OFFSET + CMDQ_THR_SHIFT * tid;
+ pc = readl(pc_va);
+ writel(pc, pc_va);
+}
+
+static int cmdq_task_insert_into_thread(struct cmdq_task *task,
+ int tid, int loop)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+ struct cmdq_thread *thread = &cqctx->thread[tid];
+ struct cmdq_task *prev_task;
+ int index, prev;
+
+ /* find previous task and then link this task behind it */
+
+ index = thread->next_cookie % CMDQ_MAX_TASK_IN_THREAD;
+ prev = (index + CMDQ_MAX_TASK_IN_THREAD - 1) % CMDQ_MAX_TASK_IN_THREAD;
+
+ prev_task = thread->cur_task[prev];
+
+ /* maybe the job is killed, search a new one */
+ for (; !prev_task && loop > 1; loop--) {
+ dev_err(dev,
+ "prev_task is NULL, prev:%d, loop:%d, index:%d\n",
+ prev, loop, index);
+
+ prev--;
+ if (prev < 0)
+ prev = CMDQ_MAX_TASK_IN_THREAD - 1;
+
+ prev_task = thread->cur_task[prev];
+ }
+
+ if (!prev_task) {
+ dev_err(dev,
+ "invalid prev_task index:%d, loop:%d\n",
+ index, loop);
+ return -EFAULT;
+ }
+
+ /* insert this task */
+ thread->cur_task[index] = task;
+ /* let previous task jump to this new task */
+ prev_task->va_base[prev_task->num_cmd - 1] = CMDQ_JUMP_BY_PA;
+ prev_task->va_base[prev_task->num_cmd - 2] = task->mva_base;
+
+ /* re-fetch command buffer again. */
+ cmdq_core_invalidate_hw_fetched_buffer(cqctx, tid);
+
+ return 0;
+}
+
+static bool cmdq_command_is_wfe(u32 *cmd)
+{
+ u32 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+ u32 wfe_op = CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT;
+
+ return (cmd[0] == wfe_option && (cmd[1] & CMDQ_OP_CODE_MASK) == wfe_op);
+}
+
+/* we assume tasks in the same display thread are waiting the same event. */
+static void cmdq_task_remove_wfe(struct cmdq_task *task)
+{
+ u32 *base = task->va_base;
+ int i;
+
+ /*
+ * Replace all WFE commands in the task command queue and
+ * replace them with JUMP_PASS.
+ */
+ for (i = 0; i < task->num_cmd; i += 2) {
+ if (cmdq_command_is_wfe(&base[i])) {
+ base[i] = CMDQ_JUMP_PASS;
+ base[i + 1] = CMDQ_JUMP_BY_OFFSET;
+ }
+ }
+}
+
+static bool cmdq_thread_is_in_wfe(struct cmdq *cqctx, int tid)
+{
+ return readl(cqctx->base + CMDQ_THR_WAIT_TOKEN_OFFSET +
+ CMDQ_THR_SHIFT * tid) & CMDQ_THR_IS_WAITING;
+}
+
+static void cmdq_thread_wait_end(struct cmdq *cqctx, int tid,
+ unsigned long end_pa)
+{
+ void __iomem *gce_base = cqctx->base;
+ unsigned long curr_pa;
+
+ if (readl_poll_timeout_atomic(
+ gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid,
+ curr_pa, curr_pa == end_pa, 1, 20)) {
+ dev_err(cqctx->dev, "GCE thread(%d) cannot run to end.\n", tid);
+ }
+}
+
+static int cmdq_task_exec_async_impl(struct cmdq_task *task, int tid)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+ void __iomem *gce_base = cqctx->base;
+ int status;
+ struct cmdq_thread *thread;
+ unsigned long flags;
+ int loop;
+ int minimum;
+ int cookie;
+
+ status = 0;
+ thread = &cqctx->thread[tid];
+
+ spin_lock_irqsave(&cqctx->exec_lock, flags);
+
+ /* update task's thread info */
+ task->thread = tid;
+ task->irq_flag = 0;
+ task->task_state = TASK_STATE_BUSY;
+
+ /* case 1. first task for this thread */
+ if (thread->task_count <= 0) {
+ if (cmdq_thread_reset(cqctx, tid) < 0) {
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+ return -EFAULT;
+ }
+
+ writel(CMDQ_THR_NO_TIMEOUT,
+ gce_base + CMDQ_THR_INST_CYCLES_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ writel(task->mva_base,
+ gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ writel(task->mva_base + task->command_size,
+ gce_base + CMDQ_THR_END_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ writel(CMDQ_THR_PRIORITY,
+ gce_base + CMDQ_THR_CFG_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ writel(CMDQ_THR_IRQ_EN,
+ gce_base + CMDQ_THR_IRQ_ENABLE_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ minimum = cmdq_thread_get_cookie(cqctx, tid);
+ cmdq_thread_insert_task_by_cookie(
+ thread, task, (minimum + 1));
+
+ /* enable HW thread */
+ writel(CMDQ_THR_ENABLED,
+ gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ } else {
+ unsigned long curr_pa, end_pa;
+
+ status = cmdq_thread_suspend(cqctx, tid);
+ if (status < 0) {
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+ return status;
+ }
+
+ writel(CMDQ_THR_NO_TIMEOUT,
+ gce_base + CMDQ_THR_INST_CYCLES_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ cookie = thread->next_cookie;
+
+ curr_pa = (unsigned long)readl(gce_base +
+ CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ end_pa = (unsigned long)readl(gce_base +
+ CMDQ_THR_END_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ /*
+ * case 2. If already exited WFE, wait for current task to end
+ * and then jump directly to new task.
+ */
+ if (!cmdq_thread_is_in_wfe(cqctx, tid)) {
+ cmdq_thread_resume(cqctx, tid);
+ cmdq_thread_wait_end(cqctx, tid, end_pa);
+ status = cmdq_thread_suspend(cqctx, tid);
+ if (status < 0) {
+ spin_unlock_irqrestore(&cqctx->exec_lock,
+ flags);
+ return status;
+ }
+ /* set to task directly */
+ writel(task->mva_base,
+ gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ writel(task->mva_base + task->command_size,
+ gce_base + CMDQ_THR_END_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD] = task;
+ thread->task_count++;
+
+ /*
+ * case 3. If thread is still in WFE from previous task, clear
+ * WFE in new task and append to thread.
+ */
+ } else {
+ /* Current task that shuld be processed */
+ minimum = cmdq_thread_get_cookie(cqctx, tid) + 1;
+ if (minimum > CMDQ_MAX_COOKIE_VALUE)
+ minimum = 0;
+
+ /* Calculate loop count to adjust the tasks' order */
+ if (minimum <= cookie)
+ loop = cookie - minimum;
+ else
+ /* Counter wrapped */
+ loop = (CMDQ_MAX_COOKIE_VALUE - minimum + 1) +
+ cookie;
+
+ if (loop < 0) {
+ dev_err(dev, "reorder fail:\n");
+ dev_err(dev, " task count=%d\n", loop);
+ dev_err(dev, " thread=%d\n", tid);
+ dev_err(dev, " next cookie=%d\n",
+ thread->next_cookie);
+ dev_err(dev, " (HW) next cookie=%d\n",
+ minimum);
+ dev_err(dev, " task=0x%p\n", task);
+
+ spin_unlock_irqrestore(&cqctx->exec_lock,
+ flags);
+ return -EFAULT;
+ }
+
+ if (loop > CMDQ_MAX_TASK_IN_THREAD)
+ loop %= CMDQ_MAX_TASK_IN_THREAD;
+
+ status = cmdq_task_insert_into_thread(task, tid, loop);
+ if (status < 0) {
+ spin_unlock_irqrestore(
+ &cqctx->exec_lock, flags);
+ dev_err(dev,
+ "invalid task state for reorder.\n");
+ return status;
+ }
+
+ cmdq_task_remove_wfe(task);
+
+ smp_mb(); /* modify jump before enable thread */
+
+ writel(task->mva_base + task->command_size,
+ gce_base + CMDQ_THR_END_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ thread->task_count++;
+ }
+
+ thread->next_cookie += 1;
+ if (thread->next_cookie > CMDQ_MAX_COOKIE_VALUE)
+ thread->next_cookie = 0;
+
+ /* resume HW thread */
+ cmdq_thread_resume(cqctx, tid);
+ }
+
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+
+ return status;
+}
+
+static void cmdq_core_handle_error(struct cmdq *cqctx, int tid, int value)
+{
+ struct device *dev = cqctx->dev;
+ void __iomem *gce_base = cqctx->base;
+ struct cmdq_thread *thread;
+ struct cmdq_task *task;
+ int cookie;
+ int count;
+ int inner;
+ int status;
+ u32 curr_pa, end_pa;
+
+ curr_pa = readl(gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ end_pa = readl(gce_base + CMDQ_THR_END_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ dev_err(dev, "IRQ: error thread=%d, irq_flag=0x%x\n", tid, value);
+ dev_err(dev, "IRQ: Thread PC: 0x%08x, End PC:0x%08x\n",
+ curr_pa, end_pa);
+
+ thread = &cqctx->thread[tid];
+
+ cookie = cmdq_thread_get_cookie(cqctx, tid);
+
+ /*
+ * we assume error happens BEFORE EOC
+ * because it wouldn't be error if this interrupt is issue by EOC.
+ * so we should inc by 1 to locate "current" task
+ */
+ cookie++;
+
+ /* set the issued task to error state */
+ if (thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD]) {
+ task = thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD];
+ task->irq_flag = value;
+ cmdq_thread_remove_task_by_index(
+ thread, cookie % CMDQ_MAX_TASK_IN_THREAD,
+ TASK_STATE_ERROR);
+ } else {
+ dev_err(dev,
+ "IRQ: can not find task in %s, pc:0x%08x, end_pc:0x%08x\n",
+ __func__, curr_pa, end_pa);
+ if (thread->task_count <= 0) {
+ /*
+ * suspend HW thread first,
+ * so that we work in a consistent state
+ * outer function should acquire spinlock:
+ * cqctx->exec_lock
+ */
+ status = cmdq_thread_suspend(cqctx, tid);
+ if (status < 0)
+ dev_err(dev, "IRQ: suspend HW thread failed!");
+
+ cmdq_thread_disable(cqctx, tid);
+ dev_err(dev,
+ "IRQ: there is no task for thread (%d) %s\n",
+ tid, __func__);
+ }
+ }
+
+ /* set the remain tasks to done state */
+ if (thread->wait_cookie <= cookie) {
+ count = cookie - thread->wait_cookie + 1;
+ } else if ((cookie + 1) % CMDQ_MAX_COOKIE_VALUE ==
+ thread->wait_cookie) {
+ count = 0;
+ } else {
+ /* counter wrapped */
+ count = (CMDQ_MAX_COOKIE_VALUE - thread->wait_cookie + 1) +
+ (cookie + 1);
+ dev_err(dev,
+ "IRQ: counter wrapped: wait cookie:%d, hw cookie:%d, count=%d",
+ thread->wait_cookie, cookie, count);
+ }
+
+ for (inner = (thread->wait_cookie % CMDQ_MAX_TASK_IN_THREAD); count > 0;
+ count--, inner++) {
+ if (inner >= CMDQ_MAX_TASK_IN_THREAD)
+ inner = 0;
+
+ if (thread->cur_task[inner]) {
+ task = thread->cur_task[inner];
+ task->irq_flag = 0; /* don't know irq flag */
+ /* still call isr_cb to prevent lock */
+ if (task->cb.isr_cb)
+ task->cb.isr_cb(task->cb.isr_data);
+ cmdq_thread_remove_task_by_index(
+ thread, inner, TASK_STATE_DONE);
+ }
+ }
+
+ thread->wait_cookie = cookie + 1;
+ if (thread->wait_cookie > CMDQ_MAX_COOKIE_VALUE)
+ thread->wait_cookie -= (CMDQ_MAX_COOKIE_VALUE + 1);
+ /* min cookie value is 0 */
+
+ wake_up(&cqctx->wait_queue[tid]);
+}
+
+static void cmdq_core_handle_done(struct cmdq *cqctx, int tid, int value)
+{
+ struct device *dev = cqctx->dev;
+ struct cmdq_thread *thread = &cqctx->thread[tid];
+ int cookie = cmdq_thread_get_cookie(cqctx, tid);
+ int count;
+ int i;
+ struct cmdq_task *task;
+
+ if (thread->wait_cookie <= cookie) {
+ count = cookie - thread->wait_cookie + 1;
+ } else if ((cookie + 1) % CMDQ_MAX_COOKIE_VALUE ==
+ thread->wait_cookie) {
+ count = 0;
+ } else {
+ /* counter wrapped */
+ count = (CMDQ_MAX_COOKIE_VALUE - thread->wait_cookie + 1) +
+ (cookie + 1);
+ dev_err(dev,
+ "IRQ: counter wrapped: wait cookie:%d, hw cookie:%d, count=%d",
+ thread->wait_cookie, cookie, count);
+ }
+
+ for (i = (thread->wait_cookie % CMDQ_MAX_TASK_IN_THREAD); count > 0;
+ count--, i++) {
+ if (i >= CMDQ_MAX_TASK_IN_THREAD)
+ i = 0;
+
+ if (thread->cur_task[i]) {
+ task = thread->cur_task[i];
+ task->irq_flag = value;
+ if (task->cb.isr_cb)
+ task->cb.isr_cb(task->cb.isr_data);
+ cmdq_thread_remove_task_by_index(
+ thread, i, TASK_STATE_DONE);
+ }
+ }
+
+ thread->wait_cookie = cookie + 1;
+ if (thread->wait_cookie > CMDQ_MAX_COOKIE_VALUE)
+ thread->wait_cookie -= (CMDQ_MAX_COOKIE_VALUE + 1);
+ /* min cookie value is 0 */
+
+ wake_up(&cqctx->wait_queue[tid]);
+}
+
+static void cmdq_core_handle_irq(struct cmdq *cqctx, int tid)
+{
+ struct device *dev = cqctx->dev;
+ void __iomem *gce_base = cqctx->base;
+ unsigned long flags = 0L;
+ int value;
+ int enabled;
+ int cookie;
+
+ /*
+ * normal execution, marks tasks done and remove from thread
+ * also, handle "loop CB fail" case
+ */
+ spin_lock_irqsave(&cqctx->exec_lock, flags);
+
+ /*
+ * it is possible for another CPU core
+ * to run "release task" right before we acquire the spin lock
+ * and thus reset / disable this HW thread
+ * so we check both the IRQ flag and the enable bit of this thread
+ */
+ value = readl(gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ if (!(value & CMDQ_THR_IRQ_MASK)) {
+ dev_err(dev,
+ "IRQ: thread %d got interrupt but IRQ flag is 0x%08x\n",
+ tid, value);
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+ return;
+ }
+
+ enabled = readl(gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ if (!(enabled & CMDQ_THR_ENABLED)) {
+ dev_err(dev,
+ "IRQ: thread %d got interrupt already disabled 0x%08x\n",
+ tid, enabled);
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+ return;
+ }
+
+ /* read HW cookie here for printing message */
+ cookie = cmdq_thread_get_cookie(cqctx, tid);
+
+ /*
+ * Move the reset IRQ before read HW cookie
+ * to prevent race condition and save the cost of suspend
+ */
+ writel(~value,
+ gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ if (value & CMDQ_THR_IRQ_ERROR)
+ cmdq_core_handle_error(cqctx, tid, value);
+ else if (value & CMDQ_THR_IRQ_DONE)
+ cmdq_core_handle_done(cqctx, tid, value);
+
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+}
+
+static int cmdq_task_exec_async(struct cmdq_task *task, int tid)
+{
+ struct device *dev = task->cqctx->dev;
+ int status;
+
+ status = cmdq_task_exec_async_impl(task, tid);
+ if (status >= 0)
+ return status;
+
+ if ((task->task_state == TASK_STATE_KILLED) ||
+ (task->task_state == TASK_STATE_ERROR)) {
+ dev_err(dev, "cmdq_task_exec_async_impl fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_core_consume_waiting_list(struct work_struct *work)
+{
+ struct list_head *p, *n = NULL;
+ bool thread_acquired;
+ ktime_t consume_time;
+ s64 waiting_time_ns;
+ bool need_log;
+ struct cmdq *cqctx;
+ struct device *dev;
+ u32 err_bits = 0;
+
+ cqctx = container_of(work, struct cmdq,
+ task_consume_wait_queue_item);
+ dev = cqctx->dev;
+
+ consume_time = ktime_get();
+
+ mutex_lock(&cqctx->task_mutex);
+
+ thread_acquired = false;
+
+ /* scan and remove (if executed) waiting tasks */
+ list_for_each_safe(p, n, &cqctx->task_wait_list) {
+ struct cmdq_task *task;
+ struct cmdq_thread *thread;
+ int tid;
+ int status;
+
+ task = list_entry(p, struct cmdq_task, list_entry);
+ tid = cmdq_eng_get_thread(task->engine_flag);
+
+ waiting_time_ns = ktime_to_ns(
+ ktime_sub(consume_time, task->submit));
+ need_log = waiting_time_ns >= CMDQ_PREALARM_TIMEOUT_NS;
+
+ /*
+ * Once waiting occur,
+ * skip following tasks to keep order of display tasks.
+ */
+ if (err_bits & BIT(tid))
+ continue;
+
+ /* acquire HW thread */
+ thread = cmdq_core_acquire_thread(cqctx, tid);
+ if (!thread) {
+ /* have to wait, remain in wait list */
+ dev_warn(dev, "acquire thread(%d) fail, need to wait\n",
+ tid);
+ if (need_log) /* task wait too long */
+ dev_warn(dev, "waiting:%lldns, task:0x%p\n",
+ waiting_time_ns, task);
+ err_bits |= BIT(tid);
+ continue;
+ }
+
+ /* some task is ready to run */
+ thread_acquired = true;
+
+ /*
+ * start execution
+ * remove from wait list and put into active list
+ */
+ list_move_tail(&task->list_entry,
+ &cqctx->task_active_list);
+
+ /* run task on thread */
+ status = cmdq_task_exec_async(task, tid);
+ if (status < 0) {
+ dev_err(dev, "%s fail, release task 0x%p\n",
+ __func__, task);
+ cmdq_task_remove_thread(task);
+ cmdq_task_release_unlocked(task);
+ task = NULL;
+ }
+ }
+
+ if (thread_acquired) {
+ /*
+ * notify some task's sw thread to change their waiting state.
+ * (if they have already called cmdq_task_wait_and_release())
+ */
+ wake_up_all(&cqctx->thread_dispatch_queue);
+ }
+
+ mutex_unlock(&cqctx->task_mutex);
+}
+
+static int cmdq_core_submit_task_async(struct cmdq_command *cmd_desc,
+ struct cmdq_task **task_out,
+ struct cmdq_task_cb *cb)
+{
+ struct cmdq *cqctx = cmd_desc->cqctx;
+
+ /* creates a new task and put into tail of waiting list */
+ *task_out = cmdq_core_acquire_task(cmd_desc, cb);
+
+ if (!(*task_out))
+ return -EFAULT;
+
+ /*
+ * Consume the waiting list.
+ * This may or may not execute the task, depending on available threads.
+ */
+ cmdq_core_consume_waiting_list(&cqctx->task_consume_wait_queue_item);
+
+ return 0;
+}
+
+static int cmdq_core_release_task(struct cmdq_task *task)
+{
+ struct cmdq *cqctx = task->cqctx;
+ int tid = task->thread;
+ struct cmdq_thread *thread = &cqctx->thread[tid];
+ unsigned long flags;
+ int status;
+
+ if (tid != CMDQ_INVALID_THREAD && thread) {
+ /* this task is being executed (or queueed) on a hw thread */
+
+ /* get sw lock first to ensure atomic access hw */
+ spin_lock_irqsave(&cqctx->exec_lock, flags);
+ smp_mb(); /* make sure atomic access hw */
+
+ status = cmdq_thread_force_remove_task(task, tid);
+ if (thread->task_count > 0)
+ cmdq_thread_resume(cqctx, tid);
+
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+ wake_up(&cqctx->wait_queue[tid]);
+ }
+
+ cmdq_task_remove_thread(task);
+ cmdq_task_release_internal(task);
+ return 0;
+}
+
+struct cmdq_task_error_report {
+ bool throw_err;
+ const char *module;
+ u32 inst_a;
+ u32 inst_b;
+ u32 irq_flag;
+};
+
+static int cmdq_task_handle_error_result(
+ struct cmdq_task *task, int tid, int wait_q,
+ struct cmdq_task_error_report *error_report)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+ void __iomem *gce_base = cqctx->base;
+ struct cmdq_thread *thread = &cqctx->thread[tid];
+ int status = 0;
+ int i;
+ bool is_err = false;
+ struct cmdq_task *next_task;
+ struct cmdq_task *prev_task;
+ int cookie;
+ unsigned long thread_pc;
+
+ dev_err(dev,
+ "task(0x%p) state is not TASK_STATE_DONE, but %d.\n",
+ task, task->task_state);
+
+ /*
+ * Oops, that task is not done.
+ * We have several possible error cases:
+ * 1. task still running (hang / timeout)
+ * 2. IRQ pending (done or error/timeout IRQ)
+ * 3. task's SW thread has been signaled (e.g. SIGKILL)
+ */
+
+ /*
+ * suspend HW thread first,
+ * so that we work in a consistent state
+ */
+ status = cmdq_thread_suspend(cqctx, tid);
+ if (status < 0)
+ error_report->throw_err = true;
+
+ /* The cookie of the task currently being processed */
+ cookie = cmdq_thread_get_cookie(cqctx, tid) + 1;
+ thread_pc = (unsigned long)readl(gce_base +
+ CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ /* process any pending IRQ */
+ error_report->irq_flag = readl(
+ gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ if (error_report->irq_flag & CMDQ_THR_IRQ_ERROR)
+ cmdq_core_handle_error(cqctx, tid, error_report->irq_flag);
+ else if (error_report->irq_flag & CMDQ_THR_IRQ_DONE)
+ cmdq_core_handle_done(cqctx, tid, error_report->irq_flag);
+
+ writel(~error_report->irq_flag,
+ gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+
+ /* check if this task has finished after handling pending IRQ */
+ if (task->task_state == TASK_STATE_DONE)
+ return 0;
+
+ /* Then decide we are SW timeout or SIGNALed (not an error) */
+ if (!wait_q) {
+ /* SW timeout and no IRQ received */
+ is_err = true;
+ dev_err(dev, "SW timeout of task 0x%p on tid %d\n",
+ task, tid);
+ error_report->throw_err = true;
+ cmdq_core_parse_error(task, tid,
+ &error_report->module,
+ &error_report->irq_flag,
+ &error_report->inst_a,
+ &error_report->inst_b);
+ status = -ETIMEDOUT;
+ } else if (wait_q < 0) {
+ /*
+ * Task is killed.
+ * Not an error, but still need to remove.
+ */
+ is_err = false;
+
+ if (wait_q == -ERESTARTSYS)
+ dev_err(dev,
+ "Task 0x%p KILLED by wait_q = -ERESTARTSYS\n",
+ task);
+ else if (wait_q == -EINTR)
+ dev_err(dev,
+ "Task 0x%p KILLED by wait_q = -EINTR\n",
+ task);
+ else
+ dev_err(dev,
+ "Task 0x%p KILLED by wait_q = %d\n",
+ task, wait_q);
+
+ status = wait_q;
+ }
+
+ if (task->task_state == TASK_STATE_BUSY) {
+ /*
+ * if task_state is BUSY,
+ * this means we did not reach EOC,
+ * did not have error IRQ.
+ * - remove the task from thread.cur_task[]
+ * - and decrease thread.task_count
+ * NOTE: after this,
+ * the cur_task will not contain link to task anymore.
+ * and task should become TASK_STATE_ERROR
+ */
+
+ /* we find our place in thread->cur_task[]. */
+ for (i = 0; i < CMDQ_MAX_TASK_IN_THREAD; i++) {
+ if (thread->cur_task[i] == task) {
+ /* update task_count and cur_task[] */
+ cmdq_thread_remove_task_by_index(
+ thread, i, is_err ?
+ TASK_STATE_ERROR :
+ TASK_STATE_KILLED);
+ break;
+ }
+ }
+ }
+
+ next_task = NULL;
+
+ /* find task's jump destination or no next task*/
+ if (task->va_base[task->num_cmd - 1] == CMDQ_JUMP_BY_PA)
+ next_task = cmdq_thread_search_task_by_pc(
+ thread,
+ task->va_base[task->num_cmd - 2]);
+
+ /*
+ * Then, we try remove task from the chain of thread->cur_task.
+ * . if HW PC falls in task range
+ * . HW EXEC_CNT += 1
+ * . thread.wait_cookie += 1
+ * . set HW PC to next task head
+ * . if not, find previous task
+ * (whose jump address is task->mva_base)
+ * . check if HW PC points is not at the EOC/JUMP end
+ * . change jump to fake EOC(no IRQ)
+ * . insert jump to next task head and increase cmd buffer size
+ * . if there is no next task, set HW End Address
+ */
+ if (task->num_cmd && thread_pc >= task->mva_base &&
+ thread_pc <= (task->mva_base + task->command_size)) {
+ if (next_task) {
+ /* cookie already +1 */
+ writel(cookie,
+ gce_base + CMDQ_THR_EXEC_CNT_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ thread->wait_cookie = cookie + 1;
+ writel(next_task->mva_base,
+ gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
+ CMDQ_THR_SHIFT * tid);
+ }
+ } else {
+ prev_task = NULL;
+ for (i = 0; i < CMDQ_MAX_TASK_IN_THREAD; i++) {
+ u32 *prev_va, *curr_va;
+ u32 prev_num, curr_num;
+
+ prev_task = thread->cur_task[i];
+ if (!prev_task)
+ continue;
+
+ prev_va = prev_task->va_base;
+ prev_num = prev_task->num_cmd;
+ if (!prev_num)
+ continue;
+
+ curr_va = task->va_base;
+ curr_num = task->num_cmd;
+
+ /* find which task JUMP into task */
+ if (prev_va[prev_num - 2] == task->mva_base &&
+ prev_va[prev_num - 1] == CMDQ_JUMP_BY_PA) {
+ /* Copy Jump instruction */
+ prev_va[prev_num - 2] =
+ curr_va[curr_num - 2];
+ prev_va[prev_num - 1] =
+ curr_va[curr_num - 1];
+
+ if (next_task)
+ cmdq_thread_reorder_task_array(
+ thread, i);
+
+ /*
+ * Give up fetched command,
+ * invoke CMDQ HW to re-fetch command.
+ */
+ cmdq_core_invalidate_hw_fetched_buffer(
+ cqctx, tid);
+
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+static int cmdq_task_wait_result(struct cmdq_task *task, int tid, int wait_q)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct cmdq_thread *thread = &cqctx->thread[tid];
+ int status = 0;
+ unsigned long flags;
+ struct cmdq_task_error_report error_report = { 0 };
+
+ /*
+ * Note that although we disable IRQ, HW continues to execute
+ * so it's possible to have pending IRQ
+ */
+ spin_lock_irqsave(&cqctx->exec_lock, flags);
+
+ if (task->task_state != TASK_STATE_DONE)
+ status = cmdq_task_handle_error_result(
+ task, tid, wait_q, &error_report);
+
+ if (thread->task_count <= 0)
+ cmdq_thread_disable(cqctx, tid);
+ else
+ cmdq_thread_resume(cqctx, tid);
+
+ spin_unlock_irqrestore(&cqctx->exec_lock, flags);
+
+ if (error_report.throw_err) {
+ u32 op = error_report.inst_a >> CMDQ_OP_CODE_SHIFT;
+
+ switch (op) {
+ case CMDQ_CODE_WFE:
+ dev_err(cqctx->dev,
+ "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:WAIT EVENT:%s\n",
+ error_report.module, error_report.irq_flag,
+ error_report.inst_a, error_report.inst_b,
+ cmdq_event_get_name(error_report.inst_a &
+ CMDQ_ARG_A_MASK));
+ break;
+ default:
+ dev_err(cqctx->dev,
+ "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:%s\n",
+ error_report.module, error_report.irq_flag,
+ error_report.inst_a, error_report.inst_b,
+ cmdq_core_parse_op(op));
+ break;
+ }
+ }
+
+ return status;
+}
+
+static int cmdq_task_wait_done(struct cmdq_task *task)
+{
+ struct cmdq *cqctx = task->cqctx;
+ struct device *dev = cqctx->dev;
+ int wait_q;
+ int tid;
+ unsigned long timeout = msecs_to_jiffies(
+ CMDQ_ACQUIRE_THREAD_TIMEOUT_MS);
+
+ /*
+ * wait for acquire thread
+ * (this is done by cmdq_core_consume_waiting_list);
+ */
+ wait_q = wait_event_timeout(
+ cqctx->thread_dispatch_queue,
+ (task->thread != CMDQ_INVALID_THREAD), timeout);
+
+ if (!wait_q) {
+ mutex_lock(&cqctx->task_mutex);
+
+ /*
+ * it's possible that the task was just consumed now.
+ * so check again.
+ */
+ if (task->thread == CMDQ_INVALID_THREAD) {
+ /*
+ * Task may have released,
+ * or starved to death.
+ */
+ dev_err(dev,
+ "task(0x%p) timeout with invalid thread\n",
+ task);
+
+ /*
+ * remove from waiting list,
+ * so that it won't be consumed in the future
+ */
+ list_del_init(&task->list_entry);
+
+ mutex_unlock(&cqctx->task_mutex);
+ return -EINVAL;
+ }
+
+ /* valid thread, so we keep going */
+ mutex_unlock(&cqctx->task_mutex);
+ }
+
+ tid = task->thread;
+ if (tid < 0 || tid >= CMDQ_MAX_THREAD_COUNT) {
+ dev_err(dev, "invalid thread %d in %s\n", tid, __func__);
+ return -EINVAL;
+ }
+
+ /* start to wait */
+ wait_q = wait_event_timeout(task->cqctx->wait_queue[tid],
+ (task->task_state != TASK_STATE_BUSY &&
+ task->task_state != TASK_STATE_WAITING),
+ msecs_to_jiffies(CMDQ_DEFAULT_TIMEOUT_MS));
+ if (!wait_q)
+ dev_dbg(dev, "timeout!\n");
+
+ /* wake up and continue */
+ return cmdq_task_wait_result(task, tid, wait_q);
+}
+
+static int cmdq_task_wait_and_release(struct cmdq_task *task)
+{
+ struct cmdq *cqctx;
+ int status;
+
+ if (!task) {
+ pr_err("%s err ptr=0x%p\n", __func__, task);
+ return -EFAULT;
+ }
+
+ if (task->task_state == TASK_STATE_IDLE) {
+ pr_err("%s task=0x%p is IDLE\n", __func__, task);
+ return -EFAULT;
+ }
+
+ cqctx = task->cqctx;
+
+ /* wait for task finish */
+ status = cmdq_task_wait_done(task);
+
+ /* release */
+ cmdq_task_remove_thread(task);
+ cmdq_task_release_internal(task);
+
+ return status;
+}
+
+static void cmdq_core_auto_release_work(struct work_struct *work_item)
+{
+ struct cmdq_task *task;
+ int status;
+ struct cmdq_task_cb cb;
+
+ task = container_of(work_item, struct cmdq_task, auto_release_work);
+ cb = task->cb;
+ status = cmdq_task_wait_and_release(task);
+
+ /* isr fail, so call isr_cb here to prevent lock */
+ if (status && cb.isr_cb)
+ cb.isr_cb(cb.isr_data);
+
+ if (cb.done_cb)
+ cb.done_cb(cb.done_data);
+}
+
+static int cmdq_core_auto_release_task(struct cmdq_task *task)
+{
+ struct cmdq *cqctx = task->cqctx;
+
+ /*
+ * the work item is embeded in task already
+ * but we need to initialized it
+ */
+ INIT_WORK(&task->auto_release_work, cmdq_core_auto_release_work);
+ queue_work(cqctx->task_auto_release_wq, &task->auto_release_work);
+ return 0;
+}
+
+static int cmdq_core_submit_task(struct cmdq_command *cmd_desc)
+{
+ struct device *dev = cmd_desc->cqctx->dev;
+ int status;
+ struct cmdq_task *task;
+
+ status = cmdq_core_submit_task_async(cmd_desc, &task, NULL);
+ if (status < 0) {
+ dev_err(dev, "cmdq_core_submit_task_async failed=%d\n", status);
+ return status;
+ }
+
+ status = cmdq_task_wait_and_release(task);
+ if (status < 0)
+ dev_err(dev, "task(0x%p) wait fail\n", task);
+
+ return status;
+}
+
+static void cmdq_core_deinitialize(struct platform_device *pdev)
+{
+ struct cmdq *cqctx = platform_get_drvdata(pdev);
+ int i;
+ struct list_head *lists[] = {
+ &cqctx->task_free_list,
+ &cqctx->task_active_list,
+ &cqctx->task_wait_list
+ };
+
+ /*
+ * Directly destroy the auto release WQ
+ * since we're going to release tasks anyway.
+ */
+ destroy_workqueue(cqctx->task_auto_release_wq);
+ cqctx->task_auto_release_wq = NULL;
+
+ destroy_workqueue(cqctx->task_consume_wq);
+ cqctx->task_consume_wq = NULL;
+
+ /* release all tasks in both list */
+ for (i = 0; i < ARRAY_SIZE(lists); i++) {
+ struct cmdq_task *task, *tmp;
+
+ list_for_each_entry_safe(task, tmp, lists[i], list_entry) {
+ cmdq_task_free_command_buffer(task);
+ kmem_cache_free(cqctx->task_cache, task);
+ list_del(&task->list_entry);
+ }
+ }
+
+ kmem_cache_destroy(cqctx->task_cache);
+ cqctx->task_cache = NULL;
+
+ /* release command buffer pool */
+ cmdq_cmd_buf_pool_uninit(cqctx);
+}
+
+static irqreturn_t cmdq_irq_handler(int irq, void *dev)
+{
+ struct cmdq *cqctx = dev;
+ int i;
+ u32 irq_status;
+ bool handled = false;
+
+ irq_status = readl(cqctx->base + CMDQ_CURR_IRQ_STATUS_OFFSET);
+ irq_status &= CMDQ_IRQ_MASK;
+ for (i = 0;
+ irq_status != CMDQ_IRQ_MASK && i < CMDQ_MAX_THREAD_COUNT;
+ i++) {
+ /* STATUS bit set to 0 means IRQ asserted */
+ if (irq_status & BIT(i))
+ continue;
+
+ /*
+ * We mark irq_status to 1 to denote finished
+ * processing, and we can early-exit if no more
+ * threads being asserted.
+ */
+ irq_status |= BIT(i);
+
+ cmdq_core_handle_irq(cqctx, i);
+ handled = true;
+ }
+
+ if (!handled)
+ return IRQ_NONE;
+
+ queue_work(cqctx->task_consume_wq,
+ &cqctx->task_consume_wait_queue_item);
+ return IRQ_HANDLED;
+}
+
+static int cmdq_core_initialize(struct platform_device *pdev,
+ struct cmdq **cqctx)
+{
+ struct cmdq *lcqctx; /* local cmdq context */
+ int i;
+ int ret = 0;
+
+ lcqctx = devm_kzalloc(&pdev->dev, sizeof(*lcqctx), GFP_KERNEL);
+
+ /* save dev */
+ lcqctx->dev = &pdev->dev;
+
+ /* initial cmdq device related data */
+ ret = cmdq_dev_init(pdev, lcqctx);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init cmdq device\n");
+ goto fail_dev;
+ }
+
+ /* initial mutex, spinlock */
+ mutex_init(&lcqctx->task_mutex);
+ mutex_init(&lcqctx->clock_mutex);
+ spin_lock_init(&lcqctx->thread_lock);
+ spin_lock_init(&lcqctx->exec_lock);
+
+ /* initial wait queue for notification */
+ for (i = 0; i < ARRAY_SIZE(lcqctx->wait_queue); i++)
+ init_waitqueue_head(&lcqctx->wait_queue[i]);
+ init_waitqueue_head(&lcqctx->thread_dispatch_queue);
+
+ /* create task pool */
+ lcqctx->task_cache = kmem_cache_create(
+ CMDQ_DRIVER_DEVICE_NAME "_task",
+ sizeof(struct cmdq_task),
+ __alignof__(struct cmdq_task),
+ SLAB_POISON | SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE,
+ &cmdq_task_ctor);
+
+ /* initialize task lists */
+ INIT_LIST_HEAD(&lcqctx->task_free_list);
+ INIT_LIST_HEAD(&lcqctx->task_active_list);
+ INIT_LIST_HEAD(&lcqctx->task_wait_list);
+ INIT_WORK(&lcqctx->task_consume_wait_queue_item,
+ cmdq_core_consume_waiting_list);
+
+ /* initialize command buffer pool */
+ ret = cmdq_cmd_buf_pool_init(lcqctx);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init command buffer pool\n");
+ goto fail_cmd_buf_pool;
+ }
+
+ lcqctx->task_auto_release_wq = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM | WQ_HIGHPRI, "cmdq_auto_release");
+ lcqctx->task_consume_wq = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM | WQ_HIGHPRI, "cmdq_task");
+
+ *cqctx = lcqctx;
+ return ret;
+
+fail_cmd_buf_pool:
+ destroy_workqueue(lcqctx->task_auto_release_wq);
+ destroy_workqueue(lcqctx->task_consume_wq);
+ kmem_cache_destroy(lcqctx->task_cache);
+
+fail_dev:
+ return ret;
+}
+
+static int cmdq_rec_realloc_cmd_buffer(struct cmdq_rec *handle, size_t size)
+{
+ void *new_buf;
+
+ new_buf = krealloc(handle->buf_ptr, size, GFP_KERNEL | __GFP_ZERO);
+ if (!new_buf)
+ return -ENOMEM;
+ handle->buf_ptr = new_buf;
+ handle->buf_size = size;
+ return 0;
+}
+
+static int cmdq_rec_stop_running_task(struct cmdq_rec *handle)
+{
+ int status;
+
+ status = cmdq_core_release_task(handle->running_task_ptr);
+ handle->running_task_ptr = NULL;
+ return status;
+}
+
+int cmdq_rec_create(struct device *dev, u64 engine_flag,
+ struct cmdq_rec **handle_ptr)
+{
+ struct cmdq *cqctx;
+ struct cmdq_rec *handle;
+ int ret;
+
+ cqctx = dev_get_drvdata(dev);
+ if (!cqctx) {
+ dev_err(dev, "cmdq context is NULL\n");
+ return -EINVAL;
+ }
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->cqctx = dev_get_drvdata(dev);
+ handle->engine_flag = engine_flag;
+
+ ret = cmdq_rec_realloc_cmd_buffer(handle, CMDQ_INITIAL_CMD_BLOCK_SIZE);
+ if (ret) {
+ kfree(handle);
+ return ret;
+ }
+
+ *handle_ptr = handle;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_rec_create);
+
+static int cmdq_rec_append_command(struct cmdq_rec *handle,
+ enum cmdq_code code,
+ u32 arg_a, u32 arg_b)
+{
+ struct cmdq *cqctx;
+ struct device *dev;
+ int subsys;
+ u32 *cmd_ptr;
+ int ret;
+
+ cqctx = handle->cqctx;
+ dev = cqctx->dev;
+ cmd_ptr = (u32 *)((u8 *)handle->buf_ptr + handle->block_size);
+
+ if (handle->finalized) {
+ dev_err(dev,
+ "already finalized record(cannot add more command)");
+ dev_err(dev, "handle=0x%p, tid=%d\n", handle, current->pid);
+ return -EBUSY;
+ }
+
+ /* check if we have sufficient buffer size */
+ if (unlikely(handle->block_size + CMDQ_INST_SIZE > handle->buf_size)) {
+ ret = cmdq_rec_realloc_cmd_buffer(handle, handle->buf_size * 2);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * we must re-calculate current PC
+ * because we may already insert MARKER inst.
+ */
+ cmd_ptr = (u32 *)((u8 *)handle->buf_ptr + handle->block_size);
+
+ switch (code) {
+ case CMDQ_CODE_MOVE:
+ cmd_ptr[0] = arg_b;
+ cmd_ptr[1] = (CMDQ_CODE_MOVE << CMDQ_OP_CODE_SHIFT) |
+ (arg_a & CMDQ_ARG_A_MASK);
+ break;
+ case CMDQ_CODE_WRITE:
+ subsys = cmdq_subsys_from_phys_addr(cqctx, arg_a);
+ if (subsys < 0) {
+ dev_err(dev,
+ "unsupported memory base address 0x%08x\n",
+ arg_a);
+ return -EFAULT;
+ }
+
+ cmd_ptr[0] = arg_b;
+ cmd_ptr[1] = (CMDQ_CODE_WRITE << CMDQ_OP_CODE_SHIFT) |
+ (arg_a & CMDQ_ARG_A_WRITE_MASK) |
+ ((subsys & CMDQ_SUBSYS_MASK) << CMDQ_SUBSYS_SHIFT);
+ break;
+ case CMDQ_CODE_JUMP:
+ cmd_ptr[0] = arg_b;
+ cmd_ptr[1] = (CMDQ_CODE_JUMP << CMDQ_OP_CODE_SHIFT) |
+ (arg_a & CMDQ_ARG_A_MASK);
+ break;
+ case CMDQ_CODE_WFE:
+ /*
+ * bit 0-11: wait_value, 1
+ * bit 15: to_wait, true
+ * bit 16-27: update_value, 0
+ * bit 31: to_update, true
+ */
+ cmd_ptr[0] = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT |
+ CMDQ_WFE_WAIT_VALUE;
+ cmd_ptr[1] = (CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) | arg_a;
+ break;
+ case CMDQ_CODE_CLEAR_EVENT:
+ /*
+ * bit 0-11: wait_value, 0
+ * bit 15: to_wait, false
+ * bit 16-27: update_value, 0
+ * bit 31: to_update, true
+ */
+ cmd_ptr[0] = CMDQ_WFE_UPDATE;
+ cmd_ptr[1] = (CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) | arg_a;
+ break;
+ case CMDQ_CODE_EOC:
+ cmd_ptr[0] = arg_b;
+ cmd_ptr[1] = (CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT) |
+ (arg_a & CMDQ_ARG_A_MASK);
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ handle->block_size += CMDQ_INST_SIZE;
+
+ return 0;
+}
+
+int cmdq_rec_reset(struct cmdq_rec *handle)
+{
+ if (handle->running_task_ptr)
+ cmdq_rec_stop_running_task(handle);
+
+ handle->block_size = 0;
+ handle->finalized = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_rec_reset);
+
+int cmdq_rec_write(struct cmdq_rec *handle, u32 value, u32 addr)
+{
+ return cmdq_rec_append_command(handle, CMDQ_CODE_WRITE, addr, value);
+}
+EXPORT_SYMBOL(cmdq_rec_write);
+
+int cmdq_rec_write_mask(struct cmdq_rec *handle, u32 value,
+ u32 addr, u32 mask)
+{
+ int ret;
+
+ if (mask != 0xffffffff) {
+ ret = cmdq_rec_append_command(handle, CMDQ_CODE_MOVE, 0, ~mask);
+ if (ret)
+ return ret;
+
+ addr = addr | CMDQ_ENABLE_MASK;
+ }
+
+ return cmdq_rec_append_command(handle, CMDQ_CODE_WRITE, addr, value);
+}
+EXPORT_SYMBOL(cmdq_rec_write_mask);
+
+int cmdq_rec_wait(struct cmdq_rec *handle, enum cmdq_event event)
+{
+ if (event == CMDQ_SYNC_TOKEN_INVALID || event >= CMDQ_SYNC_TOKEN_MAX ||
+ event < 0)
+ return -EINVAL;
+
+ return cmdq_rec_append_command(handle, CMDQ_CODE_WFE, event, 0);
+}
+EXPORT_SYMBOL(cmdq_rec_wait);
+
+int cmdq_rec_clear_event(struct cmdq_rec *handle, enum cmdq_event event)
+{
+ if (event == CMDQ_SYNC_TOKEN_INVALID || event >= CMDQ_SYNC_TOKEN_MAX ||
+ event < 0)
+ return -EINVAL;
+
+ return cmdq_rec_append_command(handle, CMDQ_CODE_CLEAR_EVENT, event, 0);
+}
+EXPORT_SYMBOL(cmdq_rec_clear_event);
+
+static int cmdq_rec_finalize_command(struct cmdq_rec *handle)
+{
+ int status;
+ struct device *dev;
+ u32 arg_b;
+
+ dev = handle->cqctx->dev;
+
+ if (!handle->finalized) {
+ /* insert EOC and generate IRQ for each command iteration */
+ arg_b = CMDQ_EOC_IRQ_EN;
+ status = cmdq_rec_append_command(handle, CMDQ_CODE_EOC,
+ 0, arg_b);
+ if (status)
+ return status;
+
+ /* JUMP to begin */
+ status = cmdq_rec_append_command(handle, CMDQ_CODE_JUMP, 0, 8);
+ if (status)
+ return status;
+
+ handle->finalized = true;
+ }
+
+ return 0;
+}
+
+static int cmdq_rec_fill_cmd_desc(struct cmdq_rec *handle,
+ struct cmdq_command *desc)
+{
+ int ret;
+
+ ret = cmdq_rec_finalize_command(handle);
+ if (ret)
+ return ret;
+
+ desc->cqctx = handle->cqctx;
+ desc->engine_flag = handle->engine_flag;
+ desc->va_base = handle->buf_ptr;
+ desc->block_size = handle->block_size;
+
+ return ret;
+}
+
+int cmdq_rec_flush(struct cmdq_rec *handle)
+{
+ int ret;
+ struct cmdq_command desc;
+
+ ret = cmdq_rec_fill_cmd_desc(handle, &desc);
+ if (ret)
+ return ret;
+
+ return cmdq_core_submit_task(&desc);
+}
+EXPORT_SYMBOL(cmdq_rec_flush);
+
+static int cmdq_rec_flush_async_cb(struct cmdq_rec *handle,
+ cmdq_async_flush_cb isr_cb,
+ void *isr_data,
+ cmdq_async_flush_cb done_cb,
+ void *done_data)
+{
+ int ret;
+ struct cmdq_command desc;
+ struct cmdq_task *task;
+ struct cmdq_task_cb cb;
+
+ ret = cmdq_rec_fill_cmd_desc(handle, &desc);
+ if (ret)
+ return ret;
+
+ cb.isr_cb = isr_cb;
+ cb.isr_data = isr_data;
+ cb.done_cb = done_cb;
+ cb.done_data = done_data;
+
+ ret = cmdq_core_submit_task_async(&desc, &task, &cb);
+ if (ret)
+ return ret;
+
+ ret = cmdq_core_auto_release_task(task);
+
+ return ret;
+}
+
+int cmdq_rec_flush_async(struct cmdq_rec *handle)
+{
+ return cmdq_rec_flush_async_cb(handle, NULL, NULL, NULL, NULL);
+}
+EXPORT_SYMBOL(cmdq_rec_flush_async);
+
+int cmdq_rec_flush_async_callback(struct cmdq_rec *handle,
+ cmdq_async_flush_cb isr_cb,
+ void *isr_data,
+ cmdq_async_flush_cb done_cb,
+ void *done_data)
+{
+ return cmdq_rec_flush_async_cb(handle, isr_cb, isr_data,
+ done_cb, done_data);
+}
+EXPORT_SYMBOL(cmdq_rec_flush_async_callback);
+
+void cmdq_rec_destroy(struct cmdq_rec *handle)
+{
+ if (handle->running_task_ptr)
+ cmdq_rec_stop_running_task(handle);
+
+ /* free command buffer */
+ kfree(handle->buf_ptr);
+ handle->buf_ptr = NULL;
+
+ /* free command handle */
+ kfree(handle);
+}
+EXPORT_SYMBOL(cmdq_rec_destroy);
+
+static int cmdq_probe(struct platform_device *pdev)
+{
+ struct cmdq *cqctx;
+ int ret;
+
+ /* init cmdq context, and save it */
+ ret = cmdq_core_initialize(pdev, &cqctx);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init cmdq context\n");
+ return ret;
+ }
+ platform_set_drvdata(pdev, cqctx);
+
+ ret = devm_request_irq(&pdev->dev, cqctx->irq, cmdq_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_SHARED,
+ CMDQ_DRIVER_DEVICE_NAME, cqctx);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ISR (%d)\n", ret);
+ goto fail;
+ }
+
+ cqctx->clock = devm_clk_get(&pdev->dev, CMDQ_CLK_NAME);
+ if (IS_ERR(cqctx->clock)) {
+ dev_err(&pdev->dev, "failed to get clk:%s\n", CMDQ_CLK_NAME);
+ ret = PTR_ERR(cqctx->clock);
+ goto fail;
+ }
+
+ return ret;
+
+fail:
+ cmdq_core_deinitialize(pdev);
+ return ret;
+}
+
+static int cmdq_remove(struct platform_device *pdev)
+{
+ cmdq_core_deinitialize(pdev);
+ return 0;
+}
+
+static const struct of_device_id cmdq_of_ids[] = {
+ {.compatible = "mediatek,mt8173-gce",},
+ {}
+};
+
+static struct platform_driver cmdq_drv = {
+ .probe = cmdq_probe,
+ .remove = cmdq_remove,
+ .driver = {
+ .name = CMDQ_DRIVER_DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cmdq_of_ids,
+ }
+};
+
+builtin_platform_driver(cmdq_drv);
diff --git a/include/soc/mediatek/cmdq.h b/include/soc/mediatek/cmdq.h
new file mode 100644
index 0000000..29931c9
--- /dev/null
+++ b/include/soc/mediatek/cmdq.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_CMDQ_H__
+#define __MTK_CMDQ_H__
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+enum cmdq_eng {
+ CMDQ_ENG_DISP_AAL,
+ CMDQ_ENG_DISP_COLOR0,
+ CMDQ_ENG_DISP_COLOR1,
+ CMDQ_ENG_DISP_DPI0,
+ CMDQ_ENG_DISP_DSI0,
+ CMDQ_ENG_DISP_DSI1,
+ CMDQ_ENG_DISP_GAMMA,
+ CMDQ_ENG_DISP_OD,
+ CMDQ_ENG_DISP_OVL0,
+ CMDQ_ENG_DISP_OVL1,
+ CMDQ_ENG_DISP_PWM0,
+ CMDQ_ENG_DISP_PWM1,
+ CMDQ_ENG_DISP_RDMA0,
+ CMDQ_ENG_DISP_RDMA1,
+ CMDQ_ENG_DISP_RDMA2,
+ CMDQ_ENG_DISP_UFOE,
+ CMDQ_ENG_DISP_WDMA0,
+ CMDQ_ENG_DISP_WDMA1,
+ CMDQ_ENG_MAX,
+};
+
+/* events for CMDQ and display */
+enum cmdq_event {
+ /* Display start of frame(SOF) events */
+ CMDQ_EVENT_DISP_OVL0_SOF = 11,
+ CMDQ_EVENT_DISP_OVL1_SOF = 12,
+ CMDQ_EVENT_DISP_RDMA0_SOF = 13,
+ CMDQ_EVENT_DISP_RDMA1_SOF = 14,
+ CMDQ_EVENT_DISP_RDMA2_SOF = 15,
+ CMDQ_EVENT_DISP_WDMA0_SOF = 16,
+ CMDQ_EVENT_DISP_WDMA1_SOF = 17,
+ /* Display end of frame(EOF) events */
+ CMDQ_EVENT_DISP_OVL0_EOF = 39,
+ CMDQ_EVENT_DISP_OVL1_EOF = 40,
+ CMDQ_EVENT_DISP_RDMA0_EOF = 41,
+ CMDQ_EVENT_DISP_RDMA1_EOF = 42,
+ CMDQ_EVENT_DISP_RDMA2_EOF = 43,
+ CMDQ_EVENT_DISP_WDMA0_EOF = 44,
+ CMDQ_EVENT_DISP_WDMA1_EOF = 45,
+ /* Mutex end of frame(EOF) events */
+ CMDQ_EVENT_MUTEX0_STREAM_EOF = 53,
+ CMDQ_EVENT_MUTEX1_STREAM_EOF = 54,
+ CMDQ_EVENT_MUTEX2_STREAM_EOF = 55,
+ CMDQ_EVENT_MUTEX3_STREAM_EOF = 56,
+ CMDQ_EVENT_MUTEX4_STREAM_EOF = 57,
+ /* Display underrun events */
+ CMDQ_EVENT_DISP_RDMA0_UNDERRUN = 63,
+ CMDQ_EVENT_DISP_RDMA1_UNDERRUN = 64,
+ CMDQ_EVENT_DISP_RDMA2_UNDERRUN = 65,
+ /* Keep this at the end of HW events */
+ CMDQ_MAX_HW_EVENT_COUNT = 260,
+ /* This is max event and also can be used as mask. */
+ CMDQ_SYNC_TOKEN_MAX = 0x1ff,
+ /* Invalid event */
+ CMDQ_SYNC_TOKEN_INVALID = -1,
+};
+
+/* called after isr done or task done */
+typedef int (*cmdq_async_flush_cb)(void *data);
+
+struct cmdq_task;
+struct cmdq;
+
+struct cmdq_rec {
+ struct cmdq *cqctx;
+ u64 engine_flag;
+ size_t block_size; /* command size */
+ void *buf_ptr;
+ size_t buf_size;
+ /* running task after flush */
+ struct cmdq_task *running_task_ptr;
+ bool finalized;
+};
+
+/**
+ * cmdq_rec_create() - create command queue record handle
+ * @dev: device
+ * @engine_flag: command queue engine flag
+ * @handle_ptr: command queue record handle pointer to retrieve cmdq_rec
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_rec_create(struct device *dev, u64 engine_flag,
+ struct cmdq_rec **handle_ptr);
+
+/**
+ * cmdq_rec_reset() - reset command queue record commands
+ * @handle: the command queue record handle
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_rec_reset(struct cmdq_rec *handle);
+
+/**
+ * cmdq_rec_write() - append write command to the command queue record
+ * @handle: the command queue record handle
+ * @value: the specified target register value
+ * @addr: the specified target register physical address
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_rec_write(struct cmdq_rec *handle, u32 value, u32 addr);
+
+/**
+ * cmdq_rec_write_mask() - append write command with mask to the command queue
+ * record
+ * @handle: the command queue record handle
+ * @value: the specified target register value
+ * @addr: the specified target register physical address
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_rec_write_mask(struct cmdq_rec *handle, u32 value,
+ u32 addr, u32 mask);
+
+/**
+ * cmdq_rec_wait() - append wait command to the command queue record
+ * @handle: the command queue record handle
+ * @event: the desired event type to "wait and CLEAR"
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_rec_wait(struct cmdq_rec *handle, enum cmdq_event event);
+
+/**
+ * cmdq_rec_clear_event() - append clear event command to the command queue
+ * record
+ * @handle: the command queue record handle
+ * @event: the desired event to be cleared
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_rec_clear_event(struct cmdq_rec *handle, enum cmdq_event event);
+
+/**
+ * cmdq_rec_flush() - trigger CMDQ to execute the recorded commands
+ * @handle: the command queue record handle
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Trigger CMDQ to execute the recorded commands. Note that this is a
+ * synchronous flush function. When the function returned, the recorded
+ * commands have been done.
+ */
+int cmdq_rec_flush(struct cmdq_rec *handle);
+
+/**
+ * cmdq_rec_flush_async() - trigger CMDQ to asynchronously execute the
+ * recorded commands
+ * @handle: the command queue record handle
+ *
+ * Return: 0 for successfully start execution; else the error code is returned
+ *
+ * Trigger CMDQ to asynchronously execute the recorded commands. Note that this
+ * is an ASYNC function. When the function returned, it may or may not be
+ * finished. There is no way to retrieve the result.
+ */
+int cmdq_rec_flush_async(struct cmdq_rec *handle);
+
+/**
+ * cmdq_rec_flush_async_callback() - trigger CMDQ to asynchronously execute
+ * the recorded commands and call back after
+ * ISR is finished and this flush is finished
+ * @handle: the command queue record handle
+ * @isr_cb: called by ISR in the end of CMDQ ISR
+ * @isr_data: this data will pass back to isr_cb
+ * @done_cb: called after flush is done
+ * @done_data: this data will pass back to done_cb
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Trigger CMDQ to asynchronously execute the recorded commands and call back
+ * after ISR is finished and this flush is finished. Note that this is an ASYNC
+ * function. When the function returned, it may or may not be finished. The ISR
+ * callback function is called in the end of ISR, and the done callback
+ * function is called after all commands are done.
+ */
+int cmdq_rec_flush_async_callback(struct cmdq_rec *handle,
+ cmdq_async_flush_cb isr_cb,
+ void *isr_data,
+ cmdq_async_flush_cb done_cb,
+ void *done_data);
+
+/**
+ * cmdq_rec_destroy() - destroy command queue record handle
+ * @handle: the command queue record handle
+ */
+void cmdq_rec_destroy(struct cmdq_rec *handle);
+
+#endif /* __MTK_CMDQ_H__ */
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread* Re: [RFC 01/11] soc: mediatek: MediaTek Command Queue (CMDQ) driver
2017-09-29 13:09 ` [RFC 01/11] soc: mediatek: MediaTek Command Queue (CMDQ) driver Ulrich Hecht
@ 2017-10-02 10:24 ` CK Hu
0 siblings, 0 replies; 17+ messages in thread
From: CK Hu @ 2017-10-02 10:24 UTC (permalink / raw)
To: Ulrich Hecht
Cc: jacopo, magnus.damm, dri-devel, linux-mediatek, laurent.pinchart,
linux-arm-kernel
Hi, Ulrich:
This patch looks old-fashioned. Please follow up with [1].
[1]
http://lists.infradead.org/pipermail/linux-mediatek/2017-January/007654.html
Regards,
CK
On Fri, 2017-09-29 at 15:09 +0200, Ulrich Hecht wrote:
> Ported from chromeos-3.18 kernel.
>
> Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas@gmail.com>
> ---
> drivers/soc/mediatek/Kconfig | 10 +
> drivers/soc/mediatek/Makefile | 1 +
> drivers/soc/mediatek/mtk-cmdq.c | 2814 +++++++++++++++++++++++++++++++++++++++
> include/soc/mediatek/cmdq.h | 211 +++
> 4 files changed, 3036 insertions(+)
> create mode 100644 drivers/soc/mediatek/mtk-cmdq.c
> create mode 100644 include/soc/mediatek/cmdq.h
>
> diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
> index 609bb34..ef271e0 100644
> --- a/drivers/soc/mediatek/Kconfig
> +++ b/drivers/soc/mediatek/Kconfig
> @@ -1,6 +1,16 @@
> #
> # MediaTek SoC drivers
> #
> +config MTK_CMDQ
> + bool "MediaTek CMDQ Support"
> + depends on ARCH_MEDIATEK || COMPILE_TEST
> + select MTK_INFRACFG
> + help
> + Say yes here to add support for the MediaTek Command Queue (CMDQ)
> + driver. The CMDQ is used to help read/write registers with critical
> + time limitation, such as updating display configuration during the
> + vblank.
> +
> config MTK_INFRACFG
> bool "MediaTek INFRACFG Support"
> depends on ARCH_MEDIATEK || COMPILE_TEST
> diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
> index 12998b0..f7397ef 100644
> --- a/drivers/soc/mediatek/Makefile
> +++ b/drivers/soc/mediatek/Makefile
> @@ -1,3 +1,4 @@
> +obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq.o
> obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
> obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
> obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
> diff --git a/drivers/soc/mediatek/mtk-cmdq.c b/drivers/soc/mediatek/mtk-cmdq.c
> new file mode 100644
> index 0000000..a8bfb5c
> --- /dev/null
> +++ b/drivers/soc/mediatek/mtk-cmdq.c
> @@ -0,0 +1,2814 @@
> +/*
> + * Copyright (c) 2015 MediaTek Inc.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + */
> +
> +#include <linux/clk.h>
> +#include <linux/clk-provider.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/errno.h>
> +#include <linux/interrupt.h>
> +#include <linux/iopoll.h>
> +#include <linux/kernel.h>
> +#include <linux/kthread.h>
> +#include <linux/ktime.h>
> +#include <linux/module.h>
> +#include <linux/mutex.h>
> +#include <linux/of_irq.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/workqueue.h>
> +#include <soc/mediatek/cmdq.h>
> +
> +/*
> + * Please calculate this value for each platform.
> + * task number = vblank time / ((task cmds * cmd ticks) / GCE freq)
> + */
> +#define CMDQ_MAX_TASK_IN_THREAD 70
> +
> +#define CMDQ_INITIAL_CMD_BLOCK_SIZE PAGE_SIZE
> +#define CMDQ_CMD_BUF_POOL_BUF_SIZE PAGE_SIZE
> +#define CMDQ_CMD_BUF_POOL_BUF_NUM 140 /* 2 * 70 = 140 */
> +#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
> +
> +/*
> + * cmdq_thread cookie value is from 0 to CMDQ_MAX_COOKIE_VALUE.
> + * And, this value also be used as MASK.
> + */
> +#define CMDQ_MAX_COOKIE_VALUE 0xffff
> +#define CMDQ_COOKIE_MASK CMDQ_MAX_COOKIE_VALUE
> +
> +#define CMDQ_DEFAULT_TIMEOUT_MS 1000
> +#define CMDQ_ACQUIRE_THREAD_TIMEOUT_MS 5000
> +#define CMDQ_PREALARM_TIMEOUT_NS 200000000
> +
> +#define CMDQ_INVALID_THREAD -1
> +
> +#define CMDQ_DRIVER_DEVICE_NAME "mtk_cmdq"
> +
> +#define CMDQ_CLK_NAME "gce"
> +
> +#define CMDQ_CURR_IRQ_STATUS_OFFSET 0x010
> +#define CMDQ_CURR_LOADED_THR_OFFSET 0x018
> +#define CMDQ_THR_SLOT_CYCLES_OFFSET 0x030
> +#define CMDQ_THR_EXEC_CYCLES_OFFSET 0x034
> +#define CMDQ_THR_TIMEOUT_TIMER_OFFSET 0x038
> +#define CMDQ_BUS_CONTROL_TYPE_OFFSET 0x040
> +
> +#define CMDQ_SYNC_TOKEN_ID_OFFSET 0x060
> +#define CMDQ_SYNC_TOKEN_VAL_OFFSET 0x064
> +#define CMDQ_SYNC_TOKEN_UPD_OFFSET 0x068
> +
> +#define CMDQ_GPR_SHIFT 0x004
> +#define CMDQ_GPR_OFFSET 0x080
> +
> +#define CMDQ_THR_SHIFT 0x080
> +#define CMDQ_THR_WARM_RESET_OFFSET 0x100
> +#define CMDQ_THR_ENABLE_TASK_OFFSET 0x104
> +#define CMDQ_THR_SUSPEND_TASK_OFFSET 0x108
> +#define CMDQ_THR_CURR_STATUS_OFFSET 0x10c
> +#define CMDQ_THR_IRQ_STATUS_OFFSET 0x110
> +#define CMDQ_THR_IRQ_ENABLE_OFFSET 0x114
> +#define CMDQ_THR_CURR_ADDR_OFFSET 0x120
> +#define CMDQ_THR_END_ADDR_OFFSET 0x124
> +#define CMDQ_THR_EXEC_CNT_OFFSET 0x128
> +#define CMDQ_THR_WAIT_TOKEN_OFFSET 0x130
> +#define CMDQ_THR_CFG_OFFSET 0x140
> +#define CMDQ_THR_INST_CYCLES_OFFSET 0x150
> +#define CMDQ_THR_INST_THRESX_OFFSET 0x154
> +#define CMDQ_THR_STATUS_OFFSET 0x18c
> +
> +#define CMDQ_SYNC_TOKEN_SET BIT(16)
> +#define CMDQ_IRQ_MASK 0xffff
> +
> +#define CMDQ_THR_ENABLED 0x1
> +#define CMDQ_THR_DISABLED 0x0
> +#define CMDQ_THR_SUSPEND 0x1
> +#define CMDQ_THR_RESUME 0x0
> +#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
> +#define CMDQ_THR_WARM_RESET BIT(0)
> +#define CMDQ_THR_SLOT_CYCLES 0x3200
> +#define CMDQ_THR_NO_TIMEOUT 0x0
> +#define CMDQ_THR_PRIORITY 3
> +#define CMDQ_THR_IRQ_DONE 0x1
> +#define CMDQ_THR_IRQ_ERROR 0x12
> +#define CMDQ_THR_IRQ_EN 0x13 /* done + error */
> +#define CMDQ_THR_IRQ_MASK 0x13
> +#define CMDQ_THR_EXECUTING BIT(31)
> +#define CMDQ_THR_IS_WAITING BIT(31)
> +
> +#define CMDQ_ARG_A_MASK 0xffffff
> +#define CMDQ_ARG_A_WRITE_MASK 0xffff
> +#define CMDQ_ARG_A_SUBSYS_MASK 0x1f0000
> +#define CMDQ_SUBSYS_MASK 0x1f
> +
> +#define CMDQ_OP_CODE_SHIFT 24
> +#define CMDQ_SUBSYS_SHIFT 16
> +
> +#define CMDQ_JUMP_BY_OFFSET 0x10000000
> +#define CMDQ_JUMP_BY_PA 0x10000001
> +#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
> +
> +#define CMDQ_WFE_UPDATE BIT(31)
> +#define CMDQ_WFE_WAIT BIT(15)
> +#define CMDQ_WFE_WAIT_VALUE 0x1
> +
> +#define CMDQ_MARK_NON_SUSPENDABLE BIT(21) /* 53 - 32 = 21 */
> +#define CMDQ_MARK_NOT_ADD_COUNTER BIT(16) /* 48 - 32 = 16 */
> +#define CMDQ_MARK_PREFETCH_MARKER BIT(20)
> +#define CMDQ_MARK_PREFETCH_MARKER_EN BIT(17)
> +#define CMDQ_MARK_PREFETCH_EN BIT(16)
> +
> +#define CMDQ_EOC_IRQ_EN BIT(0)
> +
> +#define CMDQ_ENABLE_MASK BIT(0)
> +
> +#define CMDQ_OP_CODE_MASK 0xff000000
> +
> +enum cmdq_thread_index {
> + CMDQ_THR_DISP_DSI0 = 0, /* main: dsi0 */
> + CMDQ_THR_DISP_DPI0, /* sub: dpi0 */
> + CMDQ_MAX_THREAD_COUNT, /* max */
> +};
> +
> +struct cmdq_command {
> + struct cmdq *cqctx;
> + /* bit flag of used engines */
> + u64 engine_flag;
> + /*
> + * pointer of instruction buffer
> + * This must point to an 64-bit aligned u32 array
> + */
> + u32 *va_base;
> + /* size of instruction buffer, in bytes. */
> + size_t block_size;
> +};
> +
> +enum cmdq_code {
> + /* These are actual HW op code. */
> + CMDQ_CODE_MOVE = 0x02,
> + CMDQ_CODE_WRITE = 0x04,
> + CMDQ_CODE_JUMP = 0x10,
> + CMDQ_CODE_WFE = 0x20, /* wait for event (and clear) */
> + CMDQ_CODE_CLEAR_EVENT = 0x21, /* clear event */
> + CMDQ_CODE_EOC = 0x40, /* end of command */
> +};
> +
> +enum cmdq_task_state {
> + TASK_STATE_IDLE, /* free task */
> + TASK_STATE_BUSY, /* task running on a thread */
> + TASK_STATE_KILLED, /* task process being killed */
> + TASK_STATE_ERROR, /* task execution error */
> + TASK_STATE_DONE, /* task finished */
> + TASK_STATE_WAITING, /* allocated but waiting for available thread */
> +};
> +
> +struct cmdq_cmd_buf {
> + atomic_t used;
> + void *va;
> + dma_addr_t pa;
> +};
> +
> +struct cmdq_task_cb {
> + /* called by isr */
> + cmdq_async_flush_cb isr_cb;
> + void *isr_data;
> + /* called by releasing task */
> + cmdq_async_flush_cb done_cb;
> + void *done_data;
> +};
> +
> +struct cmdq_task {
> + struct cmdq *cqctx;
> + struct list_head list_entry;
> +
> + /* state for task life cycle */
> + enum cmdq_task_state task_state;
> + /* virtual address of command buffer */
> + u32 *va_base;
> + /* physical address of command buffer */
> + dma_addr_t mva_base;
> + /* size of allocated command buffer */
> + size_t buf_size;
> + /* It points to a cmdq_cmd_buf if this task use command buffer pool. */
> + struct cmdq_cmd_buf *cmd_buf;
> +
> + u64 engine_flag;
> + size_t command_size;
> + u32 num_cmd; /* 2 * number of commands */
> + int reorder;
> + /* HW thread ID; CMDQ_INVALID_THREAD if not running */
> + int thread;
> + /* flag of IRQ received */
> + int irq_flag;
> + /* callback functions */
> + struct cmdq_task_cb cb;
> + /* work item when auto release is used */
> + struct work_struct auto_release_work;
> +
> + ktime_t submit; /* submit time */
> +
> + pid_t caller_pid;
> + char caller_name[TASK_COMM_LEN];
> +};
> +
> +struct cmdq_thread {
> + u32 task_count;
> + u32 wait_cookie;
> + u32 next_cookie;
> + struct cmdq_task *cur_task[CMDQ_MAX_TASK_IN_THREAD];
> +};
> +
> +struct cmdq {
> + struct device *dev;
> +
> + void __iomem *base;
> + u32 irq;
> +
> + /*
> + * task information
> + * task_cache: struct cmdq_task object cache
> + * task_free_list: unused free tasks
> + * task_active_list: active tasks
> + * task_consume_wait_queue_item: task consumption work item
> + * task_auto_release_wq: auto-release workqueue
> + * task_consume_wq: task consumption workqueue (for queued tasks)
> + */
> + struct kmem_cache *task_cache;
> + struct list_head task_free_list;
> + struct list_head task_active_list;
> + struct list_head task_wait_list;
> + struct work_struct task_consume_wait_queue_item;
> + struct workqueue_struct *task_auto_release_wq;
> + struct workqueue_struct *task_consume_wq;
> +
> + struct cmdq_thread thread[CMDQ_MAX_THREAD_COUNT];
> +
> + /* mutex, spinlock, flag */
> + struct mutex task_mutex; /* for task list */
> + struct mutex clock_mutex; /* for clock operation */
> + spinlock_t thread_lock; /* for cmdq hardware thread */
> + int thread_usage;
> + spinlock_t exec_lock; /* for exec task */
> +
> + /* command buffer pool */
> + struct cmdq_cmd_buf cmd_buf_pool[CMDQ_CMD_BUF_POOL_BUF_NUM];
> +
> + /*
> + * notification
> + * wait_queue: for task done
> + * thread_dispatch_queue: for thread acquiring
> + */
> + wait_queue_head_t wait_queue[CMDQ_MAX_THREAD_COUNT];
> + wait_queue_head_t thread_dispatch_queue;
> +
> + /* ccf */
> + struct clk *clock;
> +};
> +
> +struct cmdq_event_item {
> + enum cmdq_event event;
> + const char *name;
> +};
> +
> +struct cmdq_subsys {
> + u32 base_addr;
> + int id;
> + const char *name;
> +};
> +
> +static const struct cmdq_event_item cmdq_events[] = {
> + /* Display start of frame(SOF) events */
> + {CMDQ_EVENT_DISP_OVL0_SOF, "CMDQ_EVENT_DISP_OVL0_SOF"},
> + {CMDQ_EVENT_DISP_OVL1_SOF, "CMDQ_EVENT_DISP_OVL1_SOF"},
> + {CMDQ_EVENT_DISP_RDMA0_SOF, "CMDQ_EVENT_DISP_RDMA0_SOF"},
> + {CMDQ_EVENT_DISP_RDMA1_SOF, "CMDQ_EVENT_DISP_RDMA1_SOF"},
> + {CMDQ_EVENT_DISP_RDMA2_SOF, "CMDQ_EVENT_DISP_RDMA2_SOF"},
> + {CMDQ_EVENT_DISP_WDMA0_SOF, "CMDQ_EVENT_DISP_WDMA0_SOF"},
> + {CMDQ_EVENT_DISP_WDMA1_SOF, "CMDQ_EVENT_DISP_WDMA1_SOF"},
> + /* Display end of frame(EOF) events */
> + {CMDQ_EVENT_DISP_OVL0_EOF, "CMDQ_EVENT_DISP_OVL0_EOF"},
> + {CMDQ_EVENT_DISP_OVL1_EOF, "CMDQ_EVENT_DISP_OVL1_EOF"},
> + {CMDQ_EVENT_DISP_RDMA0_EOF, "CMDQ_EVENT_DISP_RDMA0_EOF"},
> + {CMDQ_EVENT_DISP_RDMA1_EOF, "CMDQ_EVENT_DISP_RDMA1_EOF"},
> + {CMDQ_EVENT_DISP_RDMA2_EOF, "CMDQ_EVENT_DISP_RDMA2_EOF"},
> + {CMDQ_EVENT_DISP_WDMA0_EOF, "CMDQ_EVENT_DISP_WDMA0_EOF"},
> + {CMDQ_EVENT_DISP_WDMA1_EOF, "CMDQ_EVENT_DISP_WDMA1_EOF"},
> + /* Mutex end of frame(EOF) events */
> + {CMDQ_EVENT_MUTEX0_STREAM_EOF, "CMDQ_EVENT_MUTEX0_STREAM_EOF"},
> + {CMDQ_EVENT_MUTEX1_STREAM_EOF, "CMDQ_EVENT_MUTEX1_STREAM_EOF"},
> + {CMDQ_EVENT_MUTEX2_STREAM_EOF, "CMDQ_EVENT_MUTEX2_STREAM_EOF"},
> + {CMDQ_EVENT_MUTEX3_STREAM_EOF, "CMDQ_EVENT_MUTEX3_STREAM_EOF"},
> + {CMDQ_EVENT_MUTEX4_STREAM_EOF, "CMDQ_EVENT_MUTEX4_STREAM_EOF"},
> + /* Display underrun events */
> + {CMDQ_EVENT_DISP_RDMA0_UNDERRUN, "CMDQ_EVENT_DISP_RDMA0_UNDERRUN"},
> + {CMDQ_EVENT_DISP_RDMA1_UNDERRUN, "CMDQ_EVENT_DISP_RDMA1_UNDERRUN"},
> + {CMDQ_EVENT_DISP_RDMA2_UNDERRUN, "CMDQ_EVENT_DISP_RDMA2_UNDERRUN"},
> + /* Keep this at the end of HW events */
> + {CMDQ_MAX_HW_EVENT_COUNT, "CMDQ_MAX_HW_EVENT_COUNT"},
> + /* This is max event and also can be used as mask. */
> + {CMDQ_SYNC_TOKEN_MAX, "CMDQ_SYNC_TOKEN_MAX"},
> + /* Invalid event */
> + {CMDQ_SYNC_TOKEN_INVALID, "CMDQ_SYNC_TOKEN_INVALID"},
> +};
> +
> +static const struct cmdq_subsys g_subsys[] = {
> + {0x1400, 1, "MMSYS"},
> + {0x1401, 2, "DISP"},
> + {0x1402, 3, "DISP"},
> +};
> +
> +static const char *cmdq_event_get_name(enum cmdq_event event)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(cmdq_events); i++)
> + if (cmdq_events[i].event == event)
> + return cmdq_events[i].name;
> +
> + return "CMDQ_EVENT_UNKNOWN";
> +}
> +
> +static void cmdq_event_reset(struct cmdq *cqctx)
> +{
> + int i;
> +
> + /* set all defined HW events to 0 */
> + for (i = 0; i < ARRAY_SIZE(cmdq_events); i++) {
> + if (cmdq_events[i].event >= CMDQ_MAX_HW_EVENT_COUNT)
> + break;
> + writel(cmdq_events[i].event,
> + cqctx->base + CMDQ_SYNC_TOKEN_UPD_OFFSET);
> + }
> +}
> +
> +static int cmdq_subsys_base_addr_to_id(u32 base_addr)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(g_subsys); i++) {
> + if (g_subsys[i].base_addr == base_addr)
> + return g_subsys[i].id;
> + }
> +
> + return -EFAULT;
> +}
> +
> +static u32 cmdq_subsys_id_to_base_addr(int id)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(g_subsys); i++) {
> + if (g_subsys[i].id == id)
> + return g_subsys[i].base_addr;
> + }
> +
> + return 0;
> +}
> +
> +static const char *cmdq_subsys_base_addr_to_name(u32 base_addr)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(g_subsys); i++)
> + if (g_subsys[i].base_addr == base_addr)
> + return g_subsys[i].name;
> +
> + return NULL;
> +}
> +
> +static int cmdq_eng_get_thread(u64 flag)
> +{
> + if (flag & BIT_ULL(CMDQ_ENG_DISP_DSI0))
> + return CMDQ_THR_DISP_DSI0;
> + else /* CMDQ_ENG_DISP_DPI0 */
> + return CMDQ_THR_DISP_DPI0;
> +}
> +
> +static const char *cmdq_event_get_module(enum cmdq_event event)
> +{
> + const char *module;
> +
> + switch (event) {
> + case CMDQ_EVENT_DISP_RDMA0_SOF:
> + case CMDQ_EVENT_DISP_RDMA1_SOF:
> + case CMDQ_EVENT_DISP_RDMA2_SOF:
> + case CMDQ_EVENT_DISP_RDMA0_EOF:
> + case CMDQ_EVENT_DISP_RDMA1_EOF:
> + case CMDQ_EVENT_DISP_RDMA2_EOF:
> + case CMDQ_EVENT_DISP_RDMA0_UNDERRUN:
> + case CMDQ_EVENT_DISP_RDMA1_UNDERRUN:
> + case CMDQ_EVENT_DISP_RDMA2_UNDERRUN:
> + module = "DISP_RDMA";
> + break;
> + case CMDQ_EVENT_DISP_WDMA0_SOF:
> + case CMDQ_EVENT_DISP_WDMA1_SOF:
> + case CMDQ_EVENT_DISP_WDMA0_EOF:
> + case CMDQ_EVENT_DISP_WDMA1_EOF:
> + module = "DISP_WDMA";
> + break;
> + case CMDQ_EVENT_DISP_OVL0_SOF:
> + case CMDQ_EVENT_DISP_OVL1_SOF:
> + case CMDQ_EVENT_DISP_OVL0_EOF:
> + case CMDQ_EVENT_DISP_OVL1_EOF:
> + module = "DISP_OVL";
> + break;
> + case CMDQ_EVENT_MUTEX0_STREAM_EOF ... CMDQ_EVENT_MUTEX4_STREAM_EOF:
> + module = "DISP";
> + break;
> + default:
> + module = "CMDQ";
> + break;
> + }
> +
> + return module;
> +}
> +
> +static u32 cmdq_thread_get_cookie(struct cmdq *cqctx, int tid)
> +{
> + return readl(cqctx->base + CMDQ_THR_EXEC_CNT_OFFSET +
> + CMDQ_THR_SHIFT * tid) & CMDQ_COOKIE_MASK;
> +}
> +
> +static int cmdq_cmd_buf_pool_init(struct cmdq *cqctx)
> +{
> + struct device *dev = cqctx->dev;
> + int i;
> + int ret = 0;
> + struct cmdq_cmd_buf *buf;
> +
> + for (i = 0; i < ARRAY_SIZE(cqctx->cmd_buf_pool); i++) {
> + buf = &cqctx->cmd_buf_pool[i];
> + buf->va = dma_alloc_coherent(dev, CMDQ_CMD_BUF_POOL_BUF_SIZE,
> + &buf->pa, GFP_KERNEL);
> + if (!buf->va) {
> + dev_err(dev, "failed to alloc cmdq_cmd_buf\n");
> + ret = -ENOMEM;
> + goto fail_alloc;
> + }
> + }
> +
> + return 0;
> +
> +fail_alloc:
> + for (i -= 1; i >= 0 ; i--) {
> + buf = &cqctx->cmd_buf_pool[i];
> + dma_free_coherent(dev, CMDQ_CMD_BUF_POOL_BUF_SIZE, buf->va,
> + buf->pa);
> + }
> +
> + return ret;
> +}
> +
> +static void cmdq_cmd_buf_pool_uninit(struct cmdq *cqctx)
> +{
> + struct device *dev = cqctx->dev;
> + int i;
> + struct cmdq_cmd_buf *buf;
> +
> + for (i = 0; i < ARRAY_SIZE(cqctx->cmd_buf_pool); i++) {
> + buf = &cqctx->cmd_buf_pool[i];
> + dma_free_coherent(dev, CMDQ_CMD_BUF_POOL_BUF_SIZE, buf->va,
> + buf->pa);
> + if (atomic_read(&buf->used))
> + dev_err(dev,
> + "cmdq_cmd_buf[%d] va:0x%p still in use\n",
> + i, buf->va);
> + }
> +}
> +
> +static struct cmdq_cmd_buf *cmdq_cmd_buf_pool_get(struct cmdq *cqctx)
> +{
> + int i;
> + struct cmdq_cmd_buf *buf;
> +
> + for (i = 0; i < ARRAY_SIZE(cqctx->cmd_buf_pool); i++) {
> + buf = &cqctx->cmd_buf_pool[i];
> + if (!atomic_cmpxchg(&buf->used, 0, 1))
> + return buf;
> + }
> +
> + return NULL;
> +}
> +
> +static void cmdq_cmd_buf_pool_put(struct cmdq_cmd_buf *buf)
> +{
> + atomic_set(&buf->used, 0);
> +}
> +
> +static int cmdq_subsys_from_phys_addr(struct cmdq *cqctx, u32 cmdq_phys_addr)
> +{
> + u32 base_addr = cmdq_phys_addr >> 16;
> + int subsys = cmdq_subsys_base_addr_to_id(base_addr);
> +
> + if (subsys < 0)
> + dev_err(cqctx->dev,
> + "unknown subsys: error=%d, phys=0x%08x\n",
> + subsys, cmdq_phys_addr);
> +
> + return subsys;
> +}
> +
> +/*
> + * It's a kmemcache creator for cmdq_task to initialize variables
> + * without command buffer.
> + */
> +static void cmdq_task_ctor(void *param)
> +{
> + struct cmdq_task *task = param;
> +
> + memset(task, 0, sizeof(*task));
> + INIT_LIST_HEAD(&task->list_entry);
> + task->task_state = TASK_STATE_IDLE;
> + task->thread = CMDQ_INVALID_THREAD;
> +}
> +
> +static void cmdq_task_free_command_buffer(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> +
> + if (!task->va_base)
> + return;
> +
> + if (task->cmd_buf)
> + cmdq_cmd_buf_pool_put(task->cmd_buf);
> + else
> + dma_free_coherent(dev, task->buf_size, task->va_base,
> + task->mva_base);
> +
> + task->va_base = NULL;
> + task->mva_base = 0;
> + task->buf_size = 0;
> + task->command_size = 0;
> + task->num_cmd = 0;
> + task->cmd_buf = NULL;
> +}
> +
> +/*
> + * Ensure size of command buffer in the given cmdq_task.
> + * Existing buffer data will be copied to new buffer.
> + * This buffer is guaranteed to be physically continuous.
> + * returns -ENOMEM if cannot allocate new buffer
> + */
> +static int cmdq_task_realloc_command_buffer(struct cmdq_task *task, size_t size)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> + void *new_buf = NULL;
> + dma_addr_t new_mva_base;
> + size_t cmd_size;
> + u32 num_cmd;
> + struct cmdq_cmd_buf *cmd_buf = NULL;
> +
> + if (task->va_base && task->buf_size >= size)
> + return 0;
> +
> + /* try command pool first */
> + if (size <= CMDQ_CMD_BUF_POOL_BUF_SIZE) {
> + cmd_buf = cmdq_cmd_buf_pool_get(cqctx);
> + if (cmd_buf) {
> + new_buf = cmd_buf->va;
> + new_mva_base = cmd_buf->pa;
> + memset(new_buf, 0, CMDQ_CMD_BUF_POOL_BUF_SIZE);
> + }
> + }
> +
> + if (!new_buf) {
> + new_buf = dma_alloc_coherent(dev, size, &new_mva_base,
> + GFP_KERNEL);
> + if (!new_buf) {
> + dev_err(dev, "realloc cmd buffer of size %zu failed\n",
> + size);
> + return -ENOMEM;
> + }
> + }
> +
> + /* copy and release old buffer */
> + if (task->va_base)
> + memcpy(new_buf, task->va_base, task->buf_size);
> +
> + /*
> + * we should keep track of num_cmd and cmd_size
> + * since they are cleared in free command buffer
> + */
> + num_cmd = task->num_cmd;
> + cmd_size = task->command_size;
> + cmdq_task_free_command_buffer(task);
> +
> + /* attach the new buffer */
> + task->va_base = new_buf;
> + task->mva_base = new_mva_base;
> + task->buf_size = cmd_buf ? CMDQ_CMD_BUF_POOL_BUF_SIZE : size;
> + task->num_cmd = num_cmd;
> + task->command_size = cmd_size;
> + task->cmd_buf = cmd_buf;
> +
> + return 0;
> +}
> +
> +/* allocate and initialize struct cmdq_task and its command buffer */
> +static struct cmdq_task *cmdq_task_create(struct cmdq *cqctx)
> +{
> + struct device *dev = cqctx->dev;
> + struct cmdq_task *task;
> + int status;
> +
> + task = kmem_cache_alloc(cqctx->task_cache, GFP_KERNEL);
> + task->cqctx = cqctx;
> + status = cmdq_task_realloc_command_buffer(
> + task, CMDQ_INITIAL_CMD_BLOCK_SIZE);
> + if (status < 0) {
> + dev_err(dev, "allocate command buffer failed\n");
> + kmem_cache_free(cqctx->task_cache, task);
> + return NULL;
> + }
> + return task;
> +}
> +
> +static int cmdq_dev_init(struct platform_device *pdev, struct cmdq *cqctx)
> +{
> + struct device *dev = &pdev->dev;
> + struct device_node *node = dev->of_node;
> + struct resource *res;
> +
> + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> + cqctx->base = devm_ioremap_resource(dev, res);
> + if (IS_ERR(cqctx->base)) {
> + dev_err(dev, "failed to ioremap gce\n");
> + return PTR_ERR(cqctx->base);
> + }
> +
> + cqctx->irq = irq_of_parse_and_map(node, 0);
> + if (!cqctx->irq) {
> + dev_err(dev, "failed to get irq\n");
> + return -EINVAL;
> + }
> +
> + dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
> + dev, cqctx->base, cqctx->irq);
> + return 0;
> +}
> +
> +static void cmdq_task_release_unlocked(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx = task->cqctx;
> +
> + /* This func should be inside cqctx->task_mutex mutex */
> + lockdep_assert_held(&cqctx->task_mutex);
> +
> + task->task_state = TASK_STATE_IDLE;
> + task->thread = CMDQ_INVALID_THREAD;
> +
> + cmdq_task_free_command_buffer(task);
> +
> + /*
> + * move from active/waiting list to free list
> + * todo: shrink free list
> + */
> + list_move_tail(&task->list_entry, &cqctx->task_free_list);
> +}
> +
> +static void cmdq_task_release_internal(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx = task->cqctx;
> +
> + mutex_lock(&cqctx->task_mutex);
> + cmdq_task_release_unlocked(task);
> + mutex_unlock(&cqctx->task_mutex);
> +}
> +
> +static struct cmdq_task *cmdq_core_find_free_task(struct cmdq *cqctx)
> +{
> + struct cmdq_task *task;
> +
> + mutex_lock(&cqctx->task_mutex);
> +
> + /*
> + * Pick from free list first;
> + * create one if there is no free entry.
> + */
> + if (list_empty(&cqctx->task_free_list)) {
> + task = cmdq_task_create(cqctx);
> + } else {
> + task = list_first_entry(&cqctx->task_free_list,
> + struct cmdq_task, list_entry);
> + /* remove from free list */
> + list_del_init(&task->list_entry);
> + }
> +
> + mutex_unlock(&cqctx->task_mutex);
> +
> + return task;
> +}
> +
> +/* After dropping error task, we have to reorder remaining valid tasks. */
> +static void cmdq_thread_reorder_task_array(struct cmdq_thread *thread,
> + int prev_id)
> +{
> + int i, j;
> + int next_id, search_id;
> + int reorder_count = 0;
> + struct cmdq_task *task;
> +
> + next_id = prev_id + 1;
> + for (i = 1; i < (CMDQ_MAX_TASK_IN_THREAD - 1); i++, next_id++) {
> + if (next_id >= CMDQ_MAX_TASK_IN_THREAD)
> + next_id = 0;
> +
> + if (thread->cur_task[next_id])
> + break;
> +
> + search_id = next_id + 1;
> + for (j = (i + 1); j < CMDQ_MAX_TASK_IN_THREAD;
> + j++, search_id++) {
> + if (search_id >= CMDQ_MAX_TASK_IN_THREAD)
> + search_id = 0;
> +
> + if (thread->cur_task[search_id]) {
> + thread->cur_task[next_id] =
> + thread->cur_task[search_id];
> + thread->cur_task[search_id] = NULL;
> + if ((j - i) > reorder_count)
> + reorder_count = j - i;
> +
> + break;
> + }
> + }
> +
> + task = thread->cur_task[next_id];
> + if ((task->va_base[task->num_cmd - 1] == CMDQ_JUMP_BY_OFFSET) &&
> + (task->va_base[task->num_cmd - 2] == CMDQ_JUMP_PASS)) {
> + /* We reached the last task */
> + break;
> + }
> + }
> +
> + thread->next_cookie -= reorder_count;
> +}
> +
> +static int cmdq_core_sync_command(struct cmdq_task *task,
> + struct cmdq_command *cmd_desc)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> + int status;
> + size_t size;
> +
> + size = task->command_size + CMDQ_INST_SIZE;
> + status = cmdq_task_realloc_command_buffer(task, size);
> + if (status < 0) {
> + dev_err(dev, "failed to realloc command buffer\n");
> + dev_err(dev, "task=0x%p, request size=%zu\n", task, size);
> + return status;
> + }
> +
> + /* copy the commands to our DMA buffer */
> + memcpy(task->va_base, cmd_desc->va_base, cmd_desc->block_size);
> +
> + /* re-adjust num_cmd according to command_size */
> + task->num_cmd = task->command_size / sizeof(task->va_base[0]);
> +
> + return 0;
> +}
> +
> +static struct cmdq_task *cmdq_core_acquire_task(struct cmdq_command *cmd_desc,
> + struct cmdq_task_cb *cb)
> +{
> + struct cmdq *cqctx = cmd_desc->cqctx;
> + struct device *dev = cqctx->dev;
> + struct cmdq_task *task;
> +
> + task = cmdq_core_find_free_task(cqctx);
> + if (!task) {
> + dev_err(dev, "can't acquire task info\n");
> + return NULL;
> + }
> +
> + /* initialize field values */
> + task->engine_flag = cmd_desc->engine_flag;
> + task->task_state = TASK_STATE_WAITING;
> + task->reorder = 0;
> + task->thread = CMDQ_INVALID_THREAD;
> + task->irq_flag = 0x0;
> + if (cb)
> + task->cb = *cb;
> + else
> + memset(&task->cb, 0, sizeof(task->cb));
> + task->command_size = cmd_desc->block_size;
> +
> + /* store caller info for debug */
> + if (current) {
> + task->caller_pid = current->pid;
> + memcpy(task->caller_name, current->comm, sizeof(current->comm));
> + }
> +
> + if (cmdq_core_sync_command(task, cmd_desc) < 0) {
> + dev_err(dev, "fail to sync command\n");
> + cmdq_task_release_internal(task);
> + return NULL;
> + }
> +
> + /* insert into waiting list to process */
> + if (task) {
> + task->submit = ktime_get();
> + mutex_lock(&cqctx->task_mutex);
> + list_add_tail(&task->list_entry, &cqctx->task_wait_list);
> + mutex_unlock(&cqctx->task_mutex);
> + }
> +
> + return task;
> +}
> +
> +static int cmdq_clk_enable(struct cmdq *cqctx)
> +{
> + struct device *dev = cqctx->dev;
> + int ret = 0;
> +
> + if (cqctx->thread_usage == 0) {
> + ret = clk_prepare_enable(cqctx->clock);
> + if (ret) {
> + dev_err(dev, "prepare and enable clk:%s fail\n",
> + CMDQ_CLK_NAME);
> + return ret;
> + }
> + cmdq_event_reset(cqctx);
> + }
> + cqctx->thread_usage++;
> +
> + return ret;
> +}
> +
> +static void cmdq_clk_disable(struct cmdq *cqctx)
> +{
> + cqctx->thread_usage--;
> + if (cqctx->thread_usage <= 0)
> + clk_disable_unprepare(cqctx->clock);
> +}
> +
> +static int cmdq_core_find_free_thread(struct cmdq *cqctx, int tid)
> +{
> + struct cmdq_thread *thread = cqctx->thread;
> + u32 next_cookie;
> +
> + /*
> + * make sure the found thread has enough space for the task;
> + * cmdq_thread->cur_task has size limitation.
> + */
> + if (thread[tid].task_count >= CMDQ_MAX_TASK_IN_THREAD) {
> + dev_warn(cqctx->dev, "thread(%d) task count = %d\n",
> + tid, thread[tid].task_count);
> + return CMDQ_INVALID_THREAD;
> + }
> +
> + next_cookie = thread[tid].next_cookie % CMDQ_MAX_TASK_IN_THREAD;
> + if (thread[tid].cur_task[next_cookie]) {
> + dev_warn(cqctx->dev, "thread(%d) next cookie = %d\n",
> + tid, next_cookie);
> + return CMDQ_INVALID_THREAD;
> + }
> +
> + return tid;
> +}
> +
> +static struct cmdq_thread *cmdq_core_acquire_thread(struct cmdq *cqctx,
> + int candidate_tid)
> +{
> + int tid;
> +
> + tid = cmdq_core_find_free_thread(cqctx, candidate_tid);
> + if (tid != CMDQ_INVALID_THREAD) {
> + mutex_lock(&cqctx->clock_mutex);
> + cmdq_clk_enable(cqctx);
> + mutex_unlock(&cqctx->clock_mutex);
> + return &cqctx->thread[tid];
> + }
> + return NULL;
> +}
> +
> +static void cmdq_core_release_thread(struct cmdq *cqctx, int tid)
> +{
> + if (WARN_ON(tid == CMDQ_INVALID_THREAD))
> + return;
> +
> + mutex_lock(&cqctx->clock_mutex);
> + cmdq_clk_disable(cqctx);
> + mutex_unlock(&cqctx->clock_mutex);
> +}
> +
> +static void cmdq_task_remove_thread(struct cmdq_task *task)
> +{
> + int tid = task->thread;
> +
> + task->thread = CMDQ_INVALID_THREAD;
> + cmdq_core_release_thread(task->cqctx, tid);
> +}
> +
> +static int cmdq_thread_suspend(struct cmdq *cqctx, int tid)
> +{
> + struct device *dev = cqctx->dev;
> + void __iomem *gce_base = cqctx->base;
> + u32 enabled;
> + u32 status;
> +
> + /* write suspend bit */
> + writel(CMDQ_THR_SUSPEND,
> + gce_base + CMDQ_THR_SUSPEND_TASK_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + /* If already disabled, treat as suspended successful. */
> + enabled = readl(gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + if (!(enabled & CMDQ_THR_ENABLED))
> + return 0;
> +
> + /* poll suspended status */
> + if (readl_poll_timeout_atomic(gce_base +
> + CMDQ_THR_CURR_STATUS_OFFSET +
> + CMDQ_THR_SHIFT * tid,
> + status,
> + status & CMDQ_THR_STATUS_SUSPENDED,
> + 0, 10)) {
> + dev_err(dev, "Suspend HW thread %d failed\n", tid);
> + return -EFAULT;
> + }
> +
> + return 0;
> +}
> +
> +static void cmdq_thread_resume(struct cmdq *cqctx, int tid)
> +{
> + void __iomem *gce_base = cqctx->base;
> +
> + writel(CMDQ_THR_RESUME,
> + gce_base + CMDQ_THR_SUSPEND_TASK_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +}
> +
> +static int cmdq_thread_reset(struct cmdq *cqctx, int tid)
> +{
> + struct device *dev = cqctx->dev;
> + void __iomem *gce_base = cqctx->base;
> + u32 warm_reset;
> +
> + writel(CMDQ_THR_WARM_RESET,
> + gce_base + CMDQ_THR_WARM_RESET_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + if (readl_poll_timeout_atomic(gce_base + CMDQ_THR_WARM_RESET_OFFSET +
> + CMDQ_THR_SHIFT * tid,
> + warm_reset,
> + !(warm_reset & CMDQ_THR_WARM_RESET),
> + 0, 10)) {
> + dev_err(dev, "Reset HW thread %d failed\n", tid);
> + return -EFAULT;
> + }
> +
> + writel(CMDQ_THR_SLOT_CYCLES, gce_base + CMDQ_THR_SLOT_CYCLES_OFFSET);
> + return 0;
> +}
> +
> +static int cmdq_thread_disable(struct cmdq *cqctx, int tid)
> +{
> + void __iomem *gce_base = cqctx->base;
> +
> + cmdq_thread_reset(cqctx, tid);
> + writel(CMDQ_THR_DISABLED,
> + gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + return 0;
> +}
> +
> +static u32 *cmdq_task_get_pc_and_inst(const struct cmdq_task *task, int tid,
> + u32 insts[2])
> +{
> + struct cmdq *cqctx;
> + void __iomem *gce_base;
> + unsigned long pc_pa;
> + u8 *pc_va;
> + u8 *cmd_end;
> +
> + memset(insts, 0, sizeof(u32) * 2);
> +
> + if (!task ||
> + !task->va_base ||
> + tid == CMDQ_INVALID_THREAD) {
> + pr_err("cmdq get pc failed since invalid param, task 0x%p, task->va_base:0x%p, thread:%d\n",
> + task, task->va_base, tid);
> + return NULL;
> + }
> +
> + cqctx = task->cqctx;
> + gce_base = cqctx->base;
> +
> + pc_pa = (unsigned long)readl(gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + pc_va = (u8 *)task->va_base + (pc_pa - task->mva_base);
> + cmd_end = (u8 *)(task->va_base + task->num_cmd - 1);
> +
> + if (((u8 *)task->va_base <= pc_va) && (pc_va <= cmd_end)) {
> + if (pc_va < cmd_end) {
> + /* get arg_a and arg_b */
> + insts[0] = readl(pc_va);
> + insts[1] = readl(pc_va + 4);
> + } else {
> + /* get arg_a and arg_b of previous cmd */
> + insts[0] = readl(pc_va - 8);
> + insts[1] = readl(pc_va - 4);
> + }
> + } else {
> + return NULL;
> + }
> +
> + return (u32 *)pc_va;
> +}
> +
> +static const char *cmdq_core_parse_module_from_subsys(u32 arg_a)
> +{
> + int id = (arg_a & CMDQ_ARG_A_SUBSYS_MASK) >> CMDQ_SUBSYS_SHIFT;
> + u32 base_addr = cmdq_subsys_id_to_base_addr(id);
> + const char *module = cmdq_subsys_base_addr_to_name(base_addr);
> +
> + return module ? module : "CMDQ";
> +}
> +
> +static const char *cmdq_core_parse_op(u32 op_code)
> +{
> + switch (op_code) {
> + case CMDQ_CODE_WRITE:
> + return "WRIT";
> + case CMDQ_CODE_WFE:
> + return "SYNC";
> + case CMDQ_CODE_MOVE:
> + return "MASK";
> + case CMDQ_CODE_JUMP:
> + return "JUMP";
> + case CMDQ_CODE_EOC:
> + return "MARK";
> + }
> + return NULL;
> +}
> +
> +static void cmdq_core_parse_error(struct cmdq_task *task, int tid,
> + const char **module_name, int *flag,
> + u32 *inst_a, u32 *inst_b)
> +{
> + int irq_flag = task->irq_flag;
> + u32 insts[2] = { 0 };
> + const char *module;
> +
> + /*
> + * other cases, use instruction to judge
> + * because engine flag are not sufficient
> + */
> + if (cmdq_task_get_pc_and_inst(task, tid, insts)) {
> + u32 op, arg_a, arg_b;
> +
> + op = insts[1] >> CMDQ_OP_CODE_SHIFT;
> + arg_a = insts[1] & CMDQ_ARG_A_MASK;
> + arg_b = insts[0];
> +
> + switch (op) {
> + case CMDQ_CODE_WRITE:
> + module = cmdq_core_parse_module_from_subsys(arg_a);
> + break;
> + case CMDQ_CODE_WFE:
> + /* arg_a is the event id */
> + module = cmdq_event_get_module((enum cmdq_event)arg_a);
> + break;
> + case CMDQ_CODE_MOVE:
> + case CMDQ_CODE_JUMP:
> + case CMDQ_CODE_EOC:
> + default:
> + module = "CMDQ";
> + break;
> + }
> + } else {
> + module = "CMDQ";
> + }
> +
> + /* fill output parameter */
> + *module_name = module;
> + *flag = irq_flag;
> + *inst_a = insts[1];
> + *inst_b = insts[0];
> +}
> +
> +static void cmdq_thread_insert_task_by_cookie(struct cmdq_thread *thread,
> + struct cmdq_task *task,
> + int cookie)
> +{
> + thread->wait_cookie = cookie;
> + thread->next_cookie = cookie + 1;
> + if (thread->next_cookie > CMDQ_MAX_COOKIE_VALUE)
> + thread->next_cookie = 0;
> +
> + /* first task, so set to 1 */
> + thread->task_count = 1;
> +
> + thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD] = task;
> +}
> +
> +static int cmdq_thread_remove_task_by_index(struct cmdq_thread *thread,
> + int index,
> + enum cmdq_task_state new_state)
> +{
> + struct cmdq_task *task;
> + struct device *dev;
> +
> + task = thread->cur_task[index];
> + if (!task) {
> + pr_err("%s: remove fail, task:%d on thread:0x%p is NULL\n",
> + __func__, index, thread);
> + return -EINVAL;
> + }
> + dev = task->cqctx->dev;
> +
> + /*
> + * note timing to switch a task to done_status(_ERROR, _KILLED, _DONE)
> + * is aligned with thread's taskcount change
> + * check task status to prevent double clean-up thread's taskcount
> + */
> + if (task->task_state != TASK_STATE_BUSY) {
> + dev_err(dev, "remove task failed\n");
> + dev_err(dev, "state:%d. thread:0x%p, task:%d, new_state:%d\n",
> + task->task_state, thread, index, new_state);
> + return -EINVAL;
> + }
> +
> + if (thread->task_count == 0) {
> + dev_err(dev, "no task to remove\n");
> + dev_err(dev, "thread:%d, index:%d\n", task->thread, index);
> + return -EINVAL;
> + }
> +
> + task->task_state = new_state;
> + thread->cur_task[index] = NULL;
> + thread->task_count--;
> +
> + return 0;
> +}
> +
> +static int cmdq_thread_force_remove_task(struct cmdq_task *task, int tid)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct cmdq_thread *thread = &cqctx->thread[tid];
> + void __iomem *gce_base = cqctx->base;
> + int status;
> + int cookie;
> + struct cmdq_task *exec_task;
> +
> + status = cmdq_thread_suspend(cqctx, tid);
> +
> + writel(CMDQ_THR_NO_TIMEOUT,
> + gce_base + CMDQ_THR_INST_CYCLES_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + /* The cookie of the task currently being processed */
> + cookie = cmdq_thread_get_cookie(cqctx, tid) + 1;
> +
> + exec_task = thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD];
> + if (exec_task && exec_task == task) {
> + dma_addr_t eoc_pa = task->mva_base + task->command_size - 16;
> +
> + /* The task is executed now, set the PC to EOC for bypass */
> + writel(eoc_pa,
> + gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD] = NULL;
> + task->task_state = TASK_STATE_KILLED;
> + } else {
> + int i, j;
> +
> + j = thread->task_count;
> + for (i = cookie; j > 0; j--, i++) {
> + i %= CMDQ_MAX_TASK_IN_THREAD;
> +
> + exec_task = thread->cur_task[i];
> + if (!exec_task)
> + continue;
> +
> + if ((exec_task->va_base[exec_task->num_cmd - 1] ==
> + CMDQ_JUMP_BY_OFFSET) &&
> + (exec_task->va_base[exec_task->num_cmd - 2] ==
> + CMDQ_JUMP_PASS)) {
> + /* reached the last task */
> + break;
> + }
> +
> + if (exec_task->va_base[exec_task->num_cmd - 2] ==
> + task->mva_base) {
> + /* fake EOC command */
> + exec_task->va_base[exec_task->num_cmd - 2] =
> + CMDQ_EOC_IRQ_EN;
> + exec_task->va_base[exec_task->num_cmd - 1] =
> + CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT;
> +
> + /* bypass the task */
> + exec_task->va_base[exec_task->num_cmd] =
> + task->va_base[task->num_cmd - 2];
> + exec_task->va_base[exec_task->num_cmd + 1] =
> + task->va_base[task->num_cmd - 1];
> +
> + i = (i + 1) % CMDQ_MAX_TASK_IN_THREAD;
> +
> + thread->cur_task[i] = NULL;
> + task->task_state = TASK_STATE_KILLED;
> + status = 0;
> + break;
> + }
> + }
> + }
> +
> + return status;
> +}
> +
> +static struct cmdq_task *cmdq_thread_search_task_by_pc(
> + const struct cmdq_thread *thread, u32 pc)
> +{
> + struct cmdq_task *task;
> + int i;
> +
> + for (i = 0; i < CMDQ_MAX_TASK_IN_THREAD; i++) {
> + task = thread->cur_task[i];
> + if (task &&
> + pc >= task->mva_base &&
> + pc <= task->mva_base + task->command_size)
> + break;
> + }
> +
> + return task;
> +}
> +
> +/*
> + * Re-fetch thread's command buffer
> + * Use Case:
> + * If SW modifies command buffer content after SW configed commands to GCE,
> + * SW should notify GCE to re-fetch commands in order to
> + * prevent inconsistent command buffer content between DRAM and GCE's SRAM.
> + */
> +static void cmdq_core_invalidate_hw_fetched_buffer(struct cmdq *cqctx,
> + int tid)
> +{
> + void __iomem *pc_va;
> + u32 pc;
> +
> + /*
> + * Setting HW thread PC will invoke that
> + * GCE (CMDQ HW) gives up fetched command buffer,
> + * and fetch command from DRAM to GCE's SRAM again.
> + */
> + pc_va = cqctx->base + CMDQ_THR_CURR_ADDR_OFFSET + CMDQ_THR_SHIFT * tid;
> + pc = readl(pc_va);
> + writel(pc, pc_va);
> +}
> +
> +static int cmdq_task_insert_into_thread(struct cmdq_task *task,
> + int tid, int loop)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> + struct cmdq_thread *thread = &cqctx->thread[tid];
> + struct cmdq_task *prev_task;
> + int index, prev;
> +
> + /* find previous task and then link this task behind it */
> +
> + index = thread->next_cookie % CMDQ_MAX_TASK_IN_THREAD;
> + prev = (index + CMDQ_MAX_TASK_IN_THREAD - 1) % CMDQ_MAX_TASK_IN_THREAD;
> +
> + prev_task = thread->cur_task[prev];
> +
> + /* maybe the job is killed, search a new one */
> + for (; !prev_task && loop > 1; loop--) {
> + dev_err(dev,
> + "prev_task is NULL, prev:%d, loop:%d, index:%d\n",
> + prev, loop, index);
> +
> + prev--;
> + if (prev < 0)
> + prev = CMDQ_MAX_TASK_IN_THREAD - 1;
> +
> + prev_task = thread->cur_task[prev];
> + }
> +
> + if (!prev_task) {
> + dev_err(dev,
> + "invalid prev_task index:%d, loop:%d\n",
> + index, loop);
> + return -EFAULT;
> + }
> +
> + /* insert this task */
> + thread->cur_task[index] = task;
> + /* let previous task jump to this new task */
> + prev_task->va_base[prev_task->num_cmd - 1] = CMDQ_JUMP_BY_PA;
> + prev_task->va_base[prev_task->num_cmd - 2] = task->mva_base;
> +
> + /* re-fetch command buffer again. */
> + cmdq_core_invalidate_hw_fetched_buffer(cqctx, tid);
> +
> + return 0;
> +}
> +
> +static bool cmdq_command_is_wfe(u32 *cmd)
> +{
> + u32 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
> + u32 wfe_op = CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT;
> +
> + return (cmd[0] == wfe_option && (cmd[1] & CMDQ_OP_CODE_MASK) == wfe_op);
> +}
> +
> +/* we assume tasks in the same display thread are waiting the same event. */
> +static void cmdq_task_remove_wfe(struct cmdq_task *task)
> +{
> + u32 *base = task->va_base;
> + int i;
> +
> + /*
> + * Replace all WFE commands in the task command queue and
> + * replace them with JUMP_PASS.
> + */
> + for (i = 0; i < task->num_cmd; i += 2) {
> + if (cmdq_command_is_wfe(&base[i])) {
> + base[i] = CMDQ_JUMP_PASS;
> + base[i + 1] = CMDQ_JUMP_BY_OFFSET;
> + }
> + }
> +}
> +
> +static bool cmdq_thread_is_in_wfe(struct cmdq *cqctx, int tid)
> +{
> + return readl(cqctx->base + CMDQ_THR_WAIT_TOKEN_OFFSET +
> + CMDQ_THR_SHIFT * tid) & CMDQ_THR_IS_WAITING;
> +}
> +
> +static void cmdq_thread_wait_end(struct cmdq *cqctx, int tid,
> + unsigned long end_pa)
> +{
> + void __iomem *gce_base = cqctx->base;
> + unsigned long curr_pa;
> +
> + if (readl_poll_timeout_atomic(
> + gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid,
> + curr_pa, curr_pa == end_pa, 1, 20)) {
> + dev_err(cqctx->dev, "GCE thread(%d) cannot run to end.\n", tid);
> + }
> +}
> +
> +static int cmdq_task_exec_async_impl(struct cmdq_task *task, int tid)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> + void __iomem *gce_base = cqctx->base;
> + int status;
> + struct cmdq_thread *thread;
> + unsigned long flags;
> + int loop;
> + int minimum;
> + int cookie;
> +
> + status = 0;
> + thread = &cqctx->thread[tid];
> +
> + spin_lock_irqsave(&cqctx->exec_lock, flags);
> +
> + /* update task's thread info */
> + task->thread = tid;
> + task->irq_flag = 0;
> + task->task_state = TASK_STATE_BUSY;
> +
> + /* case 1. first task for this thread */
> + if (thread->task_count <= 0) {
> + if (cmdq_thread_reset(cqctx, tid) < 0) {
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> + return -EFAULT;
> + }
> +
> + writel(CMDQ_THR_NO_TIMEOUT,
> + gce_base + CMDQ_THR_INST_CYCLES_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + writel(task->mva_base,
> + gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + writel(task->mva_base + task->command_size,
> + gce_base + CMDQ_THR_END_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + writel(CMDQ_THR_PRIORITY,
> + gce_base + CMDQ_THR_CFG_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + writel(CMDQ_THR_IRQ_EN,
> + gce_base + CMDQ_THR_IRQ_ENABLE_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + minimum = cmdq_thread_get_cookie(cqctx, tid);
> + cmdq_thread_insert_task_by_cookie(
> + thread, task, (minimum + 1));
> +
> + /* enable HW thread */
> + writel(CMDQ_THR_ENABLED,
> + gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + } else {
> + unsigned long curr_pa, end_pa;
> +
> + status = cmdq_thread_suspend(cqctx, tid);
> + if (status < 0) {
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> + return status;
> + }
> +
> + writel(CMDQ_THR_NO_TIMEOUT,
> + gce_base + CMDQ_THR_INST_CYCLES_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + cookie = thread->next_cookie;
> +
> + curr_pa = (unsigned long)readl(gce_base +
> + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + end_pa = (unsigned long)readl(gce_base +
> + CMDQ_THR_END_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + /*
> + * case 2. If already exited WFE, wait for current task to end
> + * and then jump directly to new task.
> + */
> + if (!cmdq_thread_is_in_wfe(cqctx, tid)) {
> + cmdq_thread_resume(cqctx, tid);
> + cmdq_thread_wait_end(cqctx, tid, end_pa);
> + status = cmdq_thread_suspend(cqctx, tid);
> + if (status < 0) {
> + spin_unlock_irqrestore(&cqctx->exec_lock,
> + flags);
> + return status;
> + }
> + /* set to task directly */
> + writel(task->mva_base,
> + gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + writel(task->mva_base + task->command_size,
> + gce_base + CMDQ_THR_END_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD] = task;
> + thread->task_count++;
> +
> + /*
> + * case 3. If thread is still in WFE from previous task, clear
> + * WFE in new task and append to thread.
> + */
> + } else {
> + /* Current task that shuld be processed */
> + minimum = cmdq_thread_get_cookie(cqctx, tid) + 1;
> + if (minimum > CMDQ_MAX_COOKIE_VALUE)
> + minimum = 0;
> +
> + /* Calculate loop count to adjust the tasks' order */
> + if (minimum <= cookie)
> + loop = cookie - minimum;
> + else
> + /* Counter wrapped */
> + loop = (CMDQ_MAX_COOKIE_VALUE - minimum + 1) +
> + cookie;
> +
> + if (loop < 0) {
> + dev_err(dev, "reorder fail:\n");
> + dev_err(dev, " task count=%d\n", loop);
> + dev_err(dev, " thread=%d\n", tid);
> + dev_err(dev, " next cookie=%d\n",
> + thread->next_cookie);
> + dev_err(dev, " (HW) next cookie=%d\n",
> + minimum);
> + dev_err(dev, " task=0x%p\n", task);
> +
> + spin_unlock_irqrestore(&cqctx->exec_lock,
> + flags);
> + return -EFAULT;
> + }
> +
> + if (loop > CMDQ_MAX_TASK_IN_THREAD)
> + loop %= CMDQ_MAX_TASK_IN_THREAD;
> +
> + status = cmdq_task_insert_into_thread(task, tid, loop);
> + if (status < 0) {
> + spin_unlock_irqrestore(
> + &cqctx->exec_lock, flags);
> + dev_err(dev,
> + "invalid task state for reorder.\n");
> + return status;
> + }
> +
> + cmdq_task_remove_wfe(task);
> +
> + smp_mb(); /* modify jump before enable thread */
> +
> + writel(task->mva_base + task->command_size,
> + gce_base + CMDQ_THR_END_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + thread->task_count++;
> + }
> +
> + thread->next_cookie += 1;
> + if (thread->next_cookie > CMDQ_MAX_COOKIE_VALUE)
> + thread->next_cookie = 0;
> +
> + /* resume HW thread */
> + cmdq_thread_resume(cqctx, tid);
> + }
> +
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> +
> + return status;
> +}
> +
> +static void cmdq_core_handle_error(struct cmdq *cqctx, int tid, int value)
> +{
> + struct device *dev = cqctx->dev;
> + void __iomem *gce_base = cqctx->base;
> + struct cmdq_thread *thread;
> + struct cmdq_task *task;
> + int cookie;
> + int count;
> + int inner;
> + int status;
> + u32 curr_pa, end_pa;
> +
> + curr_pa = readl(gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + end_pa = readl(gce_base + CMDQ_THR_END_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + dev_err(dev, "IRQ: error thread=%d, irq_flag=0x%x\n", tid, value);
> + dev_err(dev, "IRQ: Thread PC: 0x%08x, End PC:0x%08x\n",
> + curr_pa, end_pa);
> +
> + thread = &cqctx->thread[tid];
> +
> + cookie = cmdq_thread_get_cookie(cqctx, tid);
> +
> + /*
> + * we assume error happens BEFORE EOC
> + * because it wouldn't be error if this interrupt is issue by EOC.
> + * so we should inc by 1 to locate "current" task
> + */
> + cookie++;
> +
> + /* set the issued task to error state */
> + if (thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD]) {
> + task = thread->cur_task[cookie % CMDQ_MAX_TASK_IN_THREAD];
> + task->irq_flag = value;
> + cmdq_thread_remove_task_by_index(
> + thread, cookie % CMDQ_MAX_TASK_IN_THREAD,
> + TASK_STATE_ERROR);
> + } else {
> + dev_err(dev,
> + "IRQ: can not find task in %s, pc:0x%08x, end_pc:0x%08x\n",
> + __func__, curr_pa, end_pa);
> + if (thread->task_count <= 0) {
> + /*
> + * suspend HW thread first,
> + * so that we work in a consistent state
> + * outer function should acquire spinlock:
> + * cqctx->exec_lock
> + */
> + status = cmdq_thread_suspend(cqctx, tid);
> + if (status < 0)
> + dev_err(dev, "IRQ: suspend HW thread failed!");
> +
> + cmdq_thread_disable(cqctx, tid);
> + dev_err(dev,
> + "IRQ: there is no task for thread (%d) %s\n",
> + tid, __func__);
> + }
> + }
> +
> + /* set the remain tasks to done state */
> + if (thread->wait_cookie <= cookie) {
> + count = cookie - thread->wait_cookie + 1;
> + } else if ((cookie + 1) % CMDQ_MAX_COOKIE_VALUE ==
> + thread->wait_cookie) {
> + count = 0;
> + } else {
> + /* counter wrapped */
> + count = (CMDQ_MAX_COOKIE_VALUE - thread->wait_cookie + 1) +
> + (cookie + 1);
> + dev_err(dev,
> + "IRQ: counter wrapped: wait cookie:%d, hw cookie:%d, count=%d",
> + thread->wait_cookie, cookie, count);
> + }
> +
> + for (inner = (thread->wait_cookie % CMDQ_MAX_TASK_IN_THREAD); count > 0;
> + count--, inner++) {
> + if (inner >= CMDQ_MAX_TASK_IN_THREAD)
> + inner = 0;
> +
> + if (thread->cur_task[inner]) {
> + task = thread->cur_task[inner];
> + task->irq_flag = 0; /* don't know irq flag */
> + /* still call isr_cb to prevent lock */
> + if (task->cb.isr_cb)
> + task->cb.isr_cb(task->cb.isr_data);
> + cmdq_thread_remove_task_by_index(
> + thread, inner, TASK_STATE_DONE);
> + }
> + }
> +
> + thread->wait_cookie = cookie + 1;
> + if (thread->wait_cookie > CMDQ_MAX_COOKIE_VALUE)
> + thread->wait_cookie -= (CMDQ_MAX_COOKIE_VALUE + 1);
> + /* min cookie value is 0 */
> +
> + wake_up(&cqctx->wait_queue[tid]);
> +}
> +
> +static void cmdq_core_handle_done(struct cmdq *cqctx, int tid, int value)
> +{
> + struct device *dev = cqctx->dev;
> + struct cmdq_thread *thread = &cqctx->thread[tid];
> + int cookie = cmdq_thread_get_cookie(cqctx, tid);
> + int count;
> + int i;
> + struct cmdq_task *task;
> +
> + if (thread->wait_cookie <= cookie) {
> + count = cookie - thread->wait_cookie + 1;
> + } else if ((cookie + 1) % CMDQ_MAX_COOKIE_VALUE ==
> + thread->wait_cookie) {
> + count = 0;
> + } else {
> + /* counter wrapped */
> + count = (CMDQ_MAX_COOKIE_VALUE - thread->wait_cookie + 1) +
> + (cookie + 1);
> + dev_err(dev,
> + "IRQ: counter wrapped: wait cookie:%d, hw cookie:%d, count=%d",
> + thread->wait_cookie, cookie, count);
> + }
> +
> + for (i = (thread->wait_cookie % CMDQ_MAX_TASK_IN_THREAD); count > 0;
> + count--, i++) {
> + if (i >= CMDQ_MAX_TASK_IN_THREAD)
> + i = 0;
> +
> + if (thread->cur_task[i]) {
> + task = thread->cur_task[i];
> + task->irq_flag = value;
> + if (task->cb.isr_cb)
> + task->cb.isr_cb(task->cb.isr_data);
> + cmdq_thread_remove_task_by_index(
> + thread, i, TASK_STATE_DONE);
> + }
> + }
> +
> + thread->wait_cookie = cookie + 1;
> + if (thread->wait_cookie > CMDQ_MAX_COOKIE_VALUE)
> + thread->wait_cookie -= (CMDQ_MAX_COOKIE_VALUE + 1);
> + /* min cookie value is 0 */
> +
> + wake_up(&cqctx->wait_queue[tid]);
> +}
> +
> +static void cmdq_core_handle_irq(struct cmdq *cqctx, int tid)
> +{
> + struct device *dev = cqctx->dev;
> + void __iomem *gce_base = cqctx->base;
> + unsigned long flags = 0L;
> + int value;
> + int enabled;
> + int cookie;
> +
> + /*
> + * normal execution, marks tasks done and remove from thread
> + * also, handle "loop CB fail" case
> + */
> + spin_lock_irqsave(&cqctx->exec_lock, flags);
> +
> + /*
> + * it is possible for another CPU core
> + * to run "release task" right before we acquire the spin lock
> + * and thus reset / disable this HW thread
> + * so we check both the IRQ flag and the enable bit of this thread
> + */
> + value = readl(gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + if (!(value & CMDQ_THR_IRQ_MASK)) {
> + dev_err(dev,
> + "IRQ: thread %d got interrupt but IRQ flag is 0x%08x\n",
> + tid, value);
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> + return;
> + }
> +
> + enabled = readl(gce_base + CMDQ_THR_ENABLE_TASK_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + if (!(enabled & CMDQ_THR_ENABLED)) {
> + dev_err(dev,
> + "IRQ: thread %d got interrupt already disabled 0x%08x\n",
> + tid, enabled);
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> + return;
> + }
> +
> + /* read HW cookie here for printing message */
> + cookie = cmdq_thread_get_cookie(cqctx, tid);
> +
> + /*
> + * Move the reset IRQ before read HW cookie
> + * to prevent race condition and save the cost of suspend
> + */
> + writel(~value,
> + gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + if (value & CMDQ_THR_IRQ_ERROR)
> + cmdq_core_handle_error(cqctx, tid, value);
> + else if (value & CMDQ_THR_IRQ_DONE)
> + cmdq_core_handle_done(cqctx, tid, value);
> +
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> +}
> +
> +static int cmdq_task_exec_async(struct cmdq_task *task, int tid)
> +{
> + struct device *dev = task->cqctx->dev;
> + int status;
> +
> + status = cmdq_task_exec_async_impl(task, tid);
> + if (status >= 0)
> + return status;
> +
> + if ((task->task_state == TASK_STATE_KILLED) ||
> + (task->task_state == TASK_STATE_ERROR)) {
> + dev_err(dev, "cmdq_task_exec_async_impl fail\n");
> + return -EFAULT;
> + }
> +
> + return 0;
> +}
> +
> +static void cmdq_core_consume_waiting_list(struct work_struct *work)
> +{
> + struct list_head *p, *n = NULL;
> + bool thread_acquired;
> + ktime_t consume_time;
> + s64 waiting_time_ns;
> + bool need_log;
> + struct cmdq *cqctx;
> + struct device *dev;
> + u32 err_bits = 0;
> +
> + cqctx = container_of(work, struct cmdq,
> + task_consume_wait_queue_item);
> + dev = cqctx->dev;
> +
> + consume_time = ktime_get();
> +
> + mutex_lock(&cqctx->task_mutex);
> +
> + thread_acquired = false;
> +
> + /* scan and remove (if executed) waiting tasks */
> + list_for_each_safe(p, n, &cqctx->task_wait_list) {
> + struct cmdq_task *task;
> + struct cmdq_thread *thread;
> + int tid;
> + int status;
> +
> + task = list_entry(p, struct cmdq_task, list_entry);
> + tid = cmdq_eng_get_thread(task->engine_flag);
> +
> + waiting_time_ns = ktime_to_ns(
> + ktime_sub(consume_time, task->submit));
> + need_log = waiting_time_ns >= CMDQ_PREALARM_TIMEOUT_NS;
> +
> + /*
> + * Once waiting occur,
> + * skip following tasks to keep order of display tasks.
> + */
> + if (err_bits & BIT(tid))
> + continue;
> +
> + /* acquire HW thread */
> + thread = cmdq_core_acquire_thread(cqctx, tid);
> + if (!thread) {
> + /* have to wait, remain in wait list */
> + dev_warn(dev, "acquire thread(%d) fail, need to wait\n",
> + tid);
> + if (need_log) /* task wait too long */
> + dev_warn(dev, "waiting:%lldns, task:0x%p\n",
> + waiting_time_ns, task);
> + err_bits |= BIT(tid);
> + continue;
> + }
> +
> + /* some task is ready to run */
> + thread_acquired = true;
> +
> + /*
> + * start execution
> + * remove from wait list and put into active list
> + */
> + list_move_tail(&task->list_entry,
> + &cqctx->task_active_list);
> +
> + /* run task on thread */
> + status = cmdq_task_exec_async(task, tid);
> + if (status < 0) {
> + dev_err(dev, "%s fail, release task 0x%p\n",
> + __func__, task);
> + cmdq_task_remove_thread(task);
> + cmdq_task_release_unlocked(task);
> + task = NULL;
> + }
> + }
> +
> + if (thread_acquired) {
> + /*
> + * notify some task's sw thread to change their waiting state.
> + * (if they have already called cmdq_task_wait_and_release())
> + */
> + wake_up_all(&cqctx->thread_dispatch_queue);
> + }
> +
> + mutex_unlock(&cqctx->task_mutex);
> +}
> +
> +static int cmdq_core_submit_task_async(struct cmdq_command *cmd_desc,
> + struct cmdq_task **task_out,
> + struct cmdq_task_cb *cb)
> +{
> + struct cmdq *cqctx = cmd_desc->cqctx;
> +
> + /* creates a new task and put into tail of waiting list */
> + *task_out = cmdq_core_acquire_task(cmd_desc, cb);
> +
> + if (!(*task_out))
> + return -EFAULT;
> +
> + /*
> + * Consume the waiting list.
> + * This may or may not execute the task, depending on available threads.
> + */
> + cmdq_core_consume_waiting_list(&cqctx->task_consume_wait_queue_item);
> +
> + return 0;
> +}
> +
> +static int cmdq_core_release_task(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + int tid = task->thread;
> + struct cmdq_thread *thread = &cqctx->thread[tid];
> + unsigned long flags;
> + int status;
> +
> + if (tid != CMDQ_INVALID_THREAD && thread) {
> + /* this task is being executed (or queueed) on a hw thread */
> +
> + /* get sw lock first to ensure atomic access hw */
> + spin_lock_irqsave(&cqctx->exec_lock, flags);
> + smp_mb(); /* make sure atomic access hw */
> +
> + status = cmdq_thread_force_remove_task(task, tid);
> + if (thread->task_count > 0)
> + cmdq_thread_resume(cqctx, tid);
> +
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> + wake_up(&cqctx->wait_queue[tid]);
> + }
> +
> + cmdq_task_remove_thread(task);
> + cmdq_task_release_internal(task);
> + return 0;
> +}
> +
> +struct cmdq_task_error_report {
> + bool throw_err;
> + const char *module;
> + u32 inst_a;
> + u32 inst_b;
> + u32 irq_flag;
> +};
> +
> +static int cmdq_task_handle_error_result(
> + struct cmdq_task *task, int tid, int wait_q,
> + struct cmdq_task_error_report *error_report)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> + void __iomem *gce_base = cqctx->base;
> + struct cmdq_thread *thread = &cqctx->thread[tid];
> + int status = 0;
> + int i;
> + bool is_err = false;
> + struct cmdq_task *next_task;
> + struct cmdq_task *prev_task;
> + int cookie;
> + unsigned long thread_pc;
> +
> + dev_err(dev,
> + "task(0x%p) state is not TASK_STATE_DONE, but %d.\n",
> + task, task->task_state);
> +
> + /*
> + * Oops, that task is not done.
> + * We have several possible error cases:
> + * 1. task still running (hang / timeout)
> + * 2. IRQ pending (done or error/timeout IRQ)
> + * 3. task's SW thread has been signaled (e.g. SIGKILL)
> + */
> +
> + /*
> + * suspend HW thread first,
> + * so that we work in a consistent state
> + */
> + status = cmdq_thread_suspend(cqctx, tid);
> + if (status < 0)
> + error_report->throw_err = true;
> +
> + /* The cookie of the task currently being processed */
> + cookie = cmdq_thread_get_cookie(cqctx, tid) + 1;
> + thread_pc = (unsigned long)readl(gce_base +
> + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + /* process any pending IRQ */
> + error_report->irq_flag = readl(
> + gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + if (error_report->irq_flag & CMDQ_THR_IRQ_ERROR)
> + cmdq_core_handle_error(cqctx, tid, error_report->irq_flag);
> + else if (error_report->irq_flag & CMDQ_THR_IRQ_DONE)
> + cmdq_core_handle_done(cqctx, tid, error_report->irq_flag);
> +
> + writel(~error_report->irq_flag,
> + gce_base + CMDQ_THR_IRQ_STATUS_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> +
> + /* check if this task has finished after handling pending IRQ */
> + if (task->task_state == TASK_STATE_DONE)
> + return 0;
> +
> + /* Then decide we are SW timeout or SIGNALed (not an error) */
> + if (!wait_q) {
> + /* SW timeout and no IRQ received */
> + is_err = true;
> + dev_err(dev, "SW timeout of task 0x%p on tid %d\n",
> + task, tid);
> + error_report->throw_err = true;
> + cmdq_core_parse_error(task, tid,
> + &error_report->module,
> + &error_report->irq_flag,
> + &error_report->inst_a,
> + &error_report->inst_b);
> + status = -ETIMEDOUT;
> + } else if (wait_q < 0) {
> + /*
> + * Task is killed.
> + * Not an error, but still need to remove.
> + */
> + is_err = false;
> +
> + if (wait_q == -ERESTARTSYS)
> + dev_err(dev,
> + "Task 0x%p KILLED by wait_q = -ERESTARTSYS\n",
> + task);
> + else if (wait_q == -EINTR)
> + dev_err(dev,
> + "Task 0x%p KILLED by wait_q = -EINTR\n",
> + task);
> + else
> + dev_err(dev,
> + "Task 0x%p KILLED by wait_q = %d\n",
> + task, wait_q);
> +
> + status = wait_q;
> + }
> +
> + if (task->task_state == TASK_STATE_BUSY) {
> + /*
> + * if task_state is BUSY,
> + * this means we did not reach EOC,
> + * did not have error IRQ.
> + * - remove the task from thread.cur_task[]
> + * - and decrease thread.task_count
> + * NOTE: after this,
> + * the cur_task will not contain link to task anymore.
> + * and task should become TASK_STATE_ERROR
> + */
> +
> + /* we find our place in thread->cur_task[]. */
> + for (i = 0; i < CMDQ_MAX_TASK_IN_THREAD; i++) {
> + if (thread->cur_task[i] == task) {
> + /* update task_count and cur_task[] */
> + cmdq_thread_remove_task_by_index(
> + thread, i, is_err ?
> + TASK_STATE_ERROR :
> + TASK_STATE_KILLED);
> + break;
> + }
> + }
> + }
> +
> + next_task = NULL;
> +
> + /* find task's jump destination or no next task*/
> + if (task->va_base[task->num_cmd - 1] == CMDQ_JUMP_BY_PA)
> + next_task = cmdq_thread_search_task_by_pc(
> + thread,
> + task->va_base[task->num_cmd - 2]);
> +
> + /*
> + * Then, we try remove task from the chain of thread->cur_task.
> + * . if HW PC falls in task range
> + * . HW EXEC_CNT += 1
> + * . thread.wait_cookie += 1
> + * . set HW PC to next task head
> + * . if not, find previous task
> + * (whose jump address is task->mva_base)
> + * . check if HW PC points is not at the EOC/JUMP end
> + * . change jump to fake EOC(no IRQ)
> + * . insert jump to next task head and increase cmd buffer size
> + * . if there is no next task, set HW End Address
> + */
> + if (task->num_cmd && thread_pc >= task->mva_base &&
> + thread_pc <= (task->mva_base + task->command_size)) {
> + if (next_task) {
> + /* cookie already +1 */
> + writel(cookie,
> + gce_base + CMDQ_THR_EXEC_CNT_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + thread->wait_cookie = cookie + 1;
> + writel(next_task->mva_base,
> + gce_base + CMDQ_THR_CURR_ADDR_OFFSET +
> + CMDQ_THR_SHIFT * tid);
> + }
> + } else {
> + prev_task = NULL;
> + for (i = 0; i < CMDQ_MAX_TASK_IN_THREAD; i++) {
> + u32 *prev_va, *curr_va;
> + u32 prev_num, curr_num;
> +
> + prev_task = thread->cur_task[i];
> + if (!prev_task)
> + continue;
> +
> + prev_va = prev_task->va_base;
> + prev_num = prev_task->num_cmd;
> + if (!prev_num)
> + continue;
> +
> + curr_va = task->va_base;
> + curr_num = task->num_cmd;
> +
> + /* find which task JUMP into task */
> + if (prev_va[prev_num - 2] == task->mva_base &&
> + prev_va[prev_num - 1] == CMDQ_JUMP_BY_PA) {
> + /* Copy Jump instruction */
> + prev_va[prev_num - 2] =
> + curr_va[curr_num - 2];
> + prev_va[prev_num - 1] =
> + curr_va[curr_num - 1];
> +
> + if (next_task)
> + cmdq_thread_reorder_task_array(
> + thread, i);
> +
> + /*
> + * Give up fetched command,
> + * invoke CMDQ HW to re-fetch command.
> + */
> + cmdq_core_invalidate_hw_fetched_buffer(
> + cqctx, tid);
> +
> + break;
> + }
> + }
> + }
> +
> + return status;
> +}
> +
> +static int cmdq_task_wait_result(struct cmdq_task *task, int tid, int wait_q)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct cmdq_thread *thread = &cqctx->thread[tid];
> + int status = 0;
> + unsigned long flags;
> + struct cmdq_task_error_report error_report = { 0 };
> +
> + /*
> + * Note that although we disable IRQ, HW continues to execute
> + * so it's possible to have pending IRQ
> + */
> + spin_lock_irqsave(&cqctx->exec_lock, flags);
> +
> + if (task->task_state != TASK_STATE_DONE)
> + status = cmdq_task_handle_error_result(
> + task, tid, wait_q, &error_report);
> +
> + if (thread->task_count <= 0)
> + cmdq_thread_disable(cqctx, tid);
> + else
> + cmdq_thread_resume(cqctx, tid);
> +
> + spin_unlock_irqrestore(&cqctx->exec_lock, flags);
> +
> + if (error_report.throw_err) {
> + u32 op = error_report.inst_a >> CMDQ_OP_CODE_SHIFT;
> +
> + switch (op) {
> + case CMDQ_CODE_WFE:
> + dev_err(cqctx->dev,
> + "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:WAIT EVENT:%s\n",
> + error_report.module, error_report.irq_flag,
> + error_report.inst_a, error_report.inst_b,
> + cmdq_event_get_name(error_report.inst_a &
> + CMDQ_ARG_A_MASK));
> + break;
> + default:
> + dev_err(cqctx->dev,
> + "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:%s\n",
> + error_report.module, error_report.irq_flag,
> + error_report.inst_a, error_report.inst_b,
> + cmdq_core_parse_op(op));
> + break;
> + }
> + }
> +
> + return status;
> +}
> +
> +static int cmdq_task_wait_done(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx = task->cqctx;
> + struct device *dev = cqctx->dev;
> + int wait_q;
> + int tid;
> + unsigned long timeout = msecs_to_jiffies(
> + CMDQ_ACQUIRE_THREAD_TIMEOUT_MS);
> +
> + /*
> + * wait for acquire thread
> + * (this is done by cmdq_core_consume_waiting_list);
> + */
> + wait_q = wait_event_timeout(
> + cqctx->thread_dispatch_queue,
> + (task->thread != CMDQ_INVALID_THREAD), timeout);
> +
> + if (!wait_q) {
> + mutex_lock(&cqctx->task_mutex);
> +
> + /*
> + * it's possible that the task was just consumed now.
> + * so check again.
> + */
> + if (task->thread == CMDQ_INVALID_THREAD) {
> + /*
> + * Task may have released,
> + * or starved to death.
> + */
> + dev_err(dev,
> + "task(0x%p) timeout with invalid thread\n",
> + task);
> +
> + /*
> + * remove from waiting list,
> + * so that it won't be consumed in the future
> + */
> + list_del_init(&task->list_entry);
> +
> + mutex_unlock(&cqctx->task_mutex);
> + return -EINVAL;
> + }
> +
> + /* valid thread, so we keep going */
> + mutex_unlock(&cqctx->task_mutex);
> + }
> +
> + tid = task->thread;
> + if (tid < 0 || tid >= CMDQ_MAX_THREAD_COUNT) {
> + dev_err(dev, "invalid thread %d in %s\n", tid, __func__);
> + return -EINVAL;
> + }
> +
> + /* start to wait */
> + wait_q = wait_event_timeout(task->cqctx->wait_queue[tid],
> + (task->task_state != TASK_STATE_BUSY &&
> + task->task_state != TASK_STATE_WAITING),
> + msecs_to_jiffies(CMDQ_DEFAULT_TIMEOUT_MS));
> + if (!wait_q)
> + dev_dbg(dev, "timeout!\n");
> +
> + /* wake up and continue */
> + return cmdq_task_wait_result(task, tid, wait_q);
> +}
> +
> +static int cmdq_task_wait_and_release(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx;
> + int status;
> +
> + if (!task) {
> + pr_err("%s err ptr=0x%p\n", __func__, task);
> + return -EFAULT;
> + }
> +
> + if (task->task_state == TASK_STATE_IDLE) {
> + pr_err("%s task=0x%p is IDLE\n", __func__, task);
> + return -EFAULT;
> + }
> +
> + cqctx = task->cqctx;
> +
> + /* wait for task finish */
> + status = cmdq_task_wait_done(task);
> +
> + /* release */
> + cmdq_task_remove_thread(task);
> + cmdq_task_release_internal(task);
> +
> + return status;
> +}
> +
> +static void cmdq_core_auto_release_work(struct work_struct *work_item)
> +{
> + struct cmdq_task *task;
> + int status;
> + struct cmdq_task_cb cb;
> +
> + task = container_of(work_item, struct cmdq_task, auto_release_work);
> + cb = task->cb;
> + status = cmdq_task_wait_and_release(task);
> +
> + /* isr fail, so call isr_cb here to prevent lock */
> + if (status && cb.isr_cb)
> + cb.isr_cb(cb.isr_data);
> +
> + if (cb.done_cb)
> + cb.done_cb(cb.done_data);
> +}
> +
> +static int cmdq_core_auto_release_task(struct cmdq_task *task)
> +{
> + struct cmdq *cqctx = task->cqctx;
> +
> + /*
> + * the work item is embeded in task already
> + * but we need to initialized it
> + */
> + INIT_WORK(&task->auto_release_work, cmdq_core_auto_release_work);
> + queue_work(cqctx->task_auto_release_wq, &task->auto_release_work);
> + return 0;
> +}
> +
> +static int cmdq_core_submit_task(struct cmdq_command *cmd_desc)
> +{
> + struct device *dev = cmd_desc->cqctx->dev;
> + int status;
> + struct cmdq_task *task;
> +
> + status = cmdq_core_submit_task_async(cmd_desc, &task, NULL);
> + if (status < 0) {
> + dev_err(dev, "cmdq_core_submit_task_async failed=%d\n", status);
> + return status;
> + }
> +
> + status = cmdq_task_wait_and_release(task);
> + if (status < 0)
> + dev_err(dev, "task(0x%p) wait fail\n", task);
> +
> + return status;
> +}
> +
> +static void cmdq_core_deinitialize(struct platform_device *pdev)
> +{
> + struct cmdq *cqctx = platform_get_drvdata(pdev);
> + int i;
> + struct list_head *lists[] = {
> + &cqctx->task_free_list,
> + &cqctx->task_active_list,
> + &cqctx->task_wait_list
> + };
> +
> + /*
> + * Directly destroy the auto release WQ
> + * since we're going to release tasks anyway.
> + */
> + destroy_workqueue(cqctx->task_auto_release_wq);
> + cqctx->task_auto_release_wq = NULL;
> +
> + destroy_workqueue(cqctx->task_consume_wq);
> + cqctx->task_consume_wq = NULL;
> +
> + /* release all tasks in both list */
> + for (i = 0; i < ARRAY_SIZE(lists); i++) {
> + struct cmdq_task *task, *tmp;
> +
> + list_for_each_entry_safe(task, tmp, lists[i], list_entry) {
> + cmdq_task_free_command_buffer(task);
> + kmem_cache_free(cqctx->task_cache, task);
> + list_del(&task->list_entry);
> + }
> + }
> +
> + kmem_cache_destroy(cqctx->task_cache);
> + cqctx->task_cache = NULL;
> +
> + /* release command buffer pool */
> + cmdq_cmd_buf_pool_uninit(cqctx);
> +}
> +
> +static irqreturn_t cmdq_irq_handler(int irq, void *dev)
> +{
> + struct cmdq *cqctx = dev;
> + int i;
> + u32 irq_status;
> + bool handled = false;
> +
> + irq_status = readl(cqctx->base + CMDQ_CURR_IRQ_STATUS_OFFSET);
> + irq_status &= CMDQ_IRQ_MASK;
> + for (i = 0;
> + irq_status != CMDQ_IRQ_MASK && i < CMDQ_MAX_THREAD_COUNT;
> + i++) {
> + /* STATUS bit set to 0 means IRQ asserted */
> + if (irq_status & BIT(i))
> + continue;
> +
> + /*
> + * We mark irq_status to 1 to denote finished
> + * processing, and we can early-exit if no more
> + * threads being asserted.
> + */
> + irq_status |= BIT(i);
> +
> + cmdq_core_handle_irq(cqctx, i);
> + handled = true;
> + }
> +
> + if (!handled)
> + return IRQ_NONE;
> +
> + queue_work(cqctx->task_consume_wq,
> + &cqctx->task_consume_wait_queue_item);
> + return IRQ_HANDLED;
> +}
> +
> +static int cmdq_core_initialize(struct platform_device *pdev,
> + struct cmdq **cqctx)
> +{
> + struct cmdq *lcqctx; /* local cmdq context */
> + int i;
> + int ret = 0;
> +
> + lcqctx = devm_kzalloc(&pdev->dev, sizeof(*lcqctx), GFP_KERNEL);
> +
> + /* save dev */
> + lcqctx->dev = &pdev->dev;
> +
> + /* initial cmdq device related data */
> + ret = cmdq_dev_init(pdev, lcqctx);
> + if (ret) {
> + dev_err(&pdev->dev, "failed to init cmdq device\n");
> + goto fail_dev;
> + }
> +
> + /* initial mutex, spinlock */
> + mutex_init(&lcqctx->task_mutex);
> + mutex_init(&lcqctx->clock_mutex);
> + spin_lock_init(&lcqctx->thread_lock);
> + spin_lock_init(&lcqctx->exec_lock);
> +
> + /* initial wait queue for notification */
> + for (i = 0; i < ARRAY_SIZE(lcqctx->wait_queue); i++)
> + init_waitqueue_head(&lcqctx->wait_queue[i]);
> + init_waitqueue_head(&lcqctx->thread_dispatch_queue);
> +
> + /* create task pool */
> + lcqctx->task_cache = kmem_cache_create(
> + CMDQ_DRIVER_DEVICE_NAME "_task",
> + sizeof(struct cmdq_task),
> + __alignof__(struct cmdq_task),
> + SLAB_POISON | SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE,
> + &cmdq_task_ctor);
> +
> + /* initialize task lists */
> + INIT_LIST_HEAD(&lcqctx->task_free_list);
> + INIT_LIST_HEAD(&lcqctx->task_active_list);
> + INIT_LIST_HEAD(&lcqctx->task_wait_list);
> + INIT_WORK(&lcqctx->task_consume_wait_queue_item,
> + cmdq_core_consume_waiting_list);
> +
> + /* initialize command buffer pool */
> + ret = cmdq_cmd_buf_pool_init(lcqctx);
> + if (ret) {
> + dev_err(&pdev->dev, "failed to init command buffer pool\n");
> + goto fail_cmd_buf_pool;
> + }
> +
> + lcqctx->task_auto_release_wq = alloc_ordered_workqueue(
> + "%s", WQ_MEM_RECLAIM | WQ_HIGHPRI, "cmdq_auto_release");
> + lcqctx->task_consume_wq = alloc_ordered_workqueue(
> + "%s", WQ_MEM_RECLAIM | WQ_HIGHPRI, "cmdq_task");
> +
> + *cqctx = lcqctx;
> + return ret;
> +
> +fail_cmd_buf_pool:
> + destroy_workqueue(lcqctx->task_auto_release_wq);
> + destroy_workqueue(lcqctx->task_consume_wq);
> + kmem_cache_destroy(lcqctx->task_cache);
> +
> +fail_dev:
> + return ret;
> +}
> +
> +static int cmdq_rec_realloc_cmd_buffer(struct cmdq_rec *handle, size_t size)
> +{
> + void *new_buf;
> +
> + new_buf = krealloc(handle->buf_ptr, size, GFP_KERNEL | __GFP_ZERO);
> + if (!new_buf)
> + return -ENOMEM;
> + handle->buf_ptr = new_buf;
> + handle->buf_size = size;
> + return 0;
> +}
> +
> +static int cmdq_rec_stop_running_task(struct cmdq_rec *handle)
> +{
> + int status;
> +
> + status = cmdq_core_release_task(handle->running_task_ptr);
> + handle->running_task_ptr = NULL;
> + return status;
> +}
> +
> +int cmdq_rec_create(struct device *dev, u64 engine_flag,
> + struct cmdq_rec **handle_ptr)
> +{
> + struct cmdq *cqctx;
> + struct cmdq_rec *handle;
> + int ret;
> +
> + cqctx = dev_get_drvdata(dev);
> + if (!cqctx) {
> + dev_err(dev, "cmdq context is NULL\n");
> + return -EINVAL;
> + }
> +
> + handle = kzalloc(sizeof(*handle), GFP_KERNEL);
> + if (!handle)
> + return -ENOMEM;
> +
> + handle->cqctx = dev_get_drvdata(dev);
> + handle->engine_flag = engine_flag;
> +
> + ret = cmdq_rec_realloc_cmd_buffer(handle, CMDQ_INITIAL_CMD_BLOCK_SIZE);
> + if (ret) {
> + kfree(handle);
> + return ret;
> + }
> +
> + *handle_ptr = handle;
> +
> + return 0;
> +}
> +EXPORT_SYMBOL(cmdq_rec_create);
> +
> +static int cmdq_rec_append_command(struct cmdq_rec *handle,
> + enum cmdq_code code,
> + u32 arg_a, u32 arg_b)
> +{
> + struct cmdq *cqctx;
> + struct device *dev;
> + int subsys;
> + u32 *cmd_ptr;
> + int ret;
> +
> + cqctx = handle->cqctx;
> + dev = cqctx->dev;
> + cmd_ptr = (u32 *)((u8 *)handle->buf_ptr + handle->block_size);
> +
> + if (handle->finalized) {
> + dev_err(dev,
> + "already finalized record(cannot add more command)");
> + dev_err(dev, "handle=0x%p, tid=%d\n", handle, current->pid);
> + return -EBUSY;
> + }
> +
> + /* check if we have sufficient buffer size */
> + if (unlikely(handle->block_size + CMDQ_INST_SIZE > handle->buf_size)) {
> + ret = cmdq_rec_realloc_cmd_buffer(handle, handle->buf_size * 2);
> + if (ret)
> + return ret;
> + }
> +
> + /*
> + * we must re-calculate current PC
> + * because we may already insert MARKER inst.
> + */
> + cmd_ptr = (u32 *)((u8 *)handle->buf_ptr + handle->block_size);
> +
> + switch (code) {
> + case CMDQ_CODE_MOVE:
> + cmd_ptr[0] = arg_b;
> + cmd_ptr[1] = (CMDQ_CODE_MOVE << CMDQ_OP_CODE_SHIFT) |
> + (arg_a & CMDQ_ARG_A_MASK);
> + break;
> + case CMDQ_CODE_WRITE:
> + subsys = cmdq_subsys_from_phys_addr(cqctx, arg_a);
> + if (subsys < 0) {
> + dev_err(dev,
> + "unsupported memory base address 0x%08x\n",
> + arg_a);
> + return -EFAULT;
> + }
> +
> + cmd_ptr[0] = arg_b;
> + cmd_ptr[1] = (CMDQ_CODE_WRITE << CMDQ_OP_CODE_SHIFT) |
> + (arg_a & CMDQ_ARG_A_WRITE_MASK) |
> + ((subsys & CMDQ_SUBSYS_MASK) << CMDQ_SUBSYS_SHIFT);
> + break;
> + case CMDQ_CODE_JUMP:
> + cmd_ptr[0] = arg_b;
> + cmd_ptr[1] = (CMDQ_CODE_JUMP << CMDQ_OP_CODE_SHIFT) |
> + (arg_a & CMDQ_ARG_A_MASK);
> + break;
> + case CMDQ_CODE_WFE:
> + /*
> + * bit 0-11: wait_value, 1
> + * bit 15: to_wait, true
> + * bit 16-27: update_value, 0
> + * bit 31: to_update, true
> + */
> + cmd_ptr[0] = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT |
> + CMDQ_WFE_WAIT_VALUE;
> + cmd_ptr[1] = (CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) | arg_a;
> + break;
> + case CMDQ_CODE_CLEAR_EVENT:
> + /*
> + * bit 0-11: wait_value, 0
> + * bit 15: to_wait, false
> + * bit 16-27: update_value, 0
> + * bit 31: to_update, true
> + */
> + cmd_ptr[0] = CMDQ_WFE_UPDATE;
> + cmd_ptr[1] = (CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) | arg_a;
> + break;
> + case CMDQ_CODE_EOC:
> + cmd_ptr[0] = arg_b;
> + cmd_ptr[1] = (CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT) |
> + (arg_a & CMDQ_ARG_A_MASK);
> + break;
> + default:
> + return -EFAULT;
> + }
> +
> + handle->block_size += CMDQ_INST_SIZE;
> +
> + return 0;
> +}
> +
> +int cmdq_rec_reset(struct cmdq_rec *handle)
> +{
> + if (handle->running_task_ptr)
> + cmdq_rec_stop_running_task(handle);
> +
> + handle->block_size = 0;
> + handle->finalized = false;
> +
> + return 0;
> +}
> +EXPORT_SYMBOL(cmdq_rec_reset);
> +
> +int cmdq_rec_write(struct cmdq_rec *handle, u32 value, u32 addr)
> +{
> + return cmdq_rec_append_command(handle, CMDQ_CODE_WRITE, addr, value);
> +}
> +EXPORT_SYMBOL(cmdq_rec_write);
> +
> +int cmdq_rec_write_mask(struct cmdq_rec *handle, u32 value,
> + u32 addr, u32 mask)
> +{
> + int ret;
> +
> + if (mask != 0xffffffff) {
> + ret = cmdq_rec_append_command(handle, CMDQ_CODE_MOVE, 0, ~mask);
> + if (ret)
> + return ret;
> +
> + addr = addr | CMDQ_ENABLE_MASK;
> + }
> +
> + return cmdq_rec_append_command(handle, CMDQ_CODE_WRITE, addr, value);
> +}
> +EXPORT_SYMBOL(cmdq_rec_write_mask);
> +
> +int cmdq_rec_wait(struct cmdq_rec *handle, enum cmdq_event event)
> +{
> + if (event == CMDQ_SYNC_TOKEN_INVALID || event >= CMDQ_SYNC_TOKEN_MAX ||
> + event < 0)
> + return -EINVAL;
> +
> + return cmdq_rec_append_command(handle, CMDQ_CODE_WFE, event, 0);
> +}
> +EXPORT_SYMBOL(cmdq_rec_wait);
> +
> +int cmdq_rec_clear_event(struct cmdq_rec *handle, enum cmdq_event event)
> +{
> + if (event == CMDQ_SYNC_TOKEN_INVALID || event >= CMDQ_SYNC_TOKEN_MAX ||
> + event < 0)
> + return -EINVAL;
> +
> + return cmdq_rec_append_command(handle, CMDQ_CODE_CLEAR_EVENT, event, 0);
> +}
> +EXPORT_SYMBOL(cmdq_rec_clear_event);
> +
> +static int cmdq_rec_finalize_command(struct cmdq_rec *handle)
> +{
> + int status;
> + struct device *dev;
> + u32 arg_b;
> +
> + dev = handle->cqctx->dev;
> +
> + if (!handle->finalized) {
> + /* insert EOC and generate IRQ for each command iteration */
> + arg_b = CMDQ_EOC_IRQ_EN;
> + status = cmdq_rec_append_command(handle, CMDQ_CODE_EOC,
> + 0, arg_b);
> + if (status)
> + return status;
> +
> + /* JUMP to begin */
> + status = cmdq_rec_append_command(handle, CMDQ_CODE_JUMP, 0, 8);
> + if (status)
> + return status;
> +
> + handle->finalized = true;
> + }
> +
> + return 0;
> +}
> +
> +static int cmdq_rec_fill_cmd_desc(struct cmdq_rec *handle,
> + struct cmdq_command *desc)
> +{
> + int ret;
> +
> + ret = cmdq_rec_finalize_command(handle);
> + if (ret)
> + return ret;
> +
> + desc->cqctx = handle->cqctx;
> + desc->engine_flag = handle->engine_flag;
> + desc->va_base = handle->buf_ptr;
> + desc->block_size = handle->block_size;
> +
> + return ret;
> +}
> +
> +int cmdq_rec_flush(struct cmdq_rec *handle)
> +{
> + int ret;
> + struct cmdq_command desc;
> +
> + ret = cmdq_rec_fill_cmd_desc(handle, &desc);
> + if (ret)
> + return ret;
> +
> + return cmdq_core_submit_task(&desc);
> +}
> +EXPORT_SYMBOL(cmdq_rec_flush);
> +
> +static int cmdq_rec_flush_async_cb(struct cmdq_rec *handle,
> + cmdq_async_flush_cb isr_cb,
> + void *isr_data,
> + cmdq_async_flush_cb done_cb,
> + void *done_data)
> +{
> + int ret;
> + struct cmdq_command desc;
> + struct cmdq_task *task;
> + struct cmdq_task_cb cb;
> +
> + ret = cmdq_rec_fill_cmd_desc(handle, &desc);
> + if (ret)
> + return ret;
> +
> + cb.isr_cb = isr_cb;
> + cb.isr_data = isr_data;
> + cb.done_cb = done_cb;
> + cb.done_data = done_data;
> +
> + ret = cmdq_core_submit_task_async(&desc, &task, &cb);
> + if (ret)
> + return ret;
> +
> + ret = cmdq_core_auto_release_task(task);
> +
> + return ret;
> +}
> +
> +int cmdq_rec_flush_async(struct cmdq_rec *handle)
> +{
> + return cmdq_rec_flush_async_cb(handle, NULL, NULL, NULL, NULL);
> +}
> +EXPORT_SYMBOL(cmdq_rec_flush_async);
> +
> +int cmdq_rec_flush_async_callback(struct cmdq_rec *handle,
> + cmdq_async_flush_cb isr_cb,
> + void *isr_data,
> + cmdq_async_flush_cb done_cb,
> + void *done_data)
> +{
> + return cmdq_rec_flush_async_cb(handle, isr_cb, isr_data,
> + done_cb, done_data);
> +}
> +EXPORT_SYMBOL(cmdq_rec_flush_async_callback);
> +
> +void cmdq_rec_destroy(struct cmdq_rec *handle)
> +{
> + if (handle->running_task_ptr)
> + cmdq_rec_stop_running_task(handle);
> +
> + /* free command buffer */
> + kfree(handle->buf_ptr);
> + handle->buf_ptr = NULL;
> +
> + /* free command handle */
> + kfree(handle);
> +}
> +EXPORT_SYMBOL(cmdq_rec_destroy);
> +
> +static int cmdq_probe(struct platform_device *pdev)
> +{
> + struct cmdq *cqctx;
> + int ret;
> +
> + /* init cmdq context, and save it */
> + ret = cmdq_core_initialize(pdev, &cqctx);
> + if (ret) {
> + dev_err(&pdev->dev, "failed to init cmdq context\n");
> + return ret;
> + }
> + platform_set_drvdata(pdev, cqctx);
> +
> + ret = devm_request_irq(&pdev->dev, cqctx->irq, cmdq_irq_handler,
> + IRQF_TRIGGER_LOW | IRQF_SHARED,
> + CMDQ_DRIVER_DEVICE_NAME, cqctx);
> + if (ret) {
> + dev_err(&pdev->dev, "failed to register ISR (%d)\n", ret);
> + goto fail;
> + }
> +
> + cqctx->clock = devm_clk_get(&pdev->dev, CMDQ_CLK_NAME);
> + if (IS_ERR(cqctx->clock)) {
> + dev_err(&pdev->dev, "failed to get clk:%s\n", CMDQ_CLK_NAME);
> + ret = PTR_ERR(cqctx->clock);
> + goto fail;
> + }
> +
> + return ret;
> +
> +fail:
> + cmdq_core_deinitialize(pdev);
> + return ret;
> +}
> +
> +static int cmdq_remove(struct platform_device *pdev)
> +{
> + cmdq_core_deinitialize(pdev);
> + return 0;
> +}
> +
> +static const struct of_device_id cmdq_of_ids[] = {
> + {.compatible = "mediatek,mt8173-gce",},
> + {}
> +};
> +
> +static struct platform_driver cmdq_drv = {
> + .probe = cmdq_probe,
> + .remove = cmdq_remove,
> + .driver = {
> + .name = CMDQ_DRIVER_DEVICE_NAME,
> + .owner = THIS_MODULE,
> + .of_match_table = cmdq_of_ids,
> + }
> +};
> +
> +builtin_platform_driver(cmdq_drv);
> diff --git a/include/soc/mediatek/cmdq.h b/include/soc/mediatek/cmdq.h
> new file mode 100644
> index 0000000..29931c9
> --- /dev/null
> +++ b/include/soc/mediatek/cmdq.h
> @@ -0,0 +1,211 @@
> +/*
> + * Copyright (c) 2015 MediaTek Inc.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + */
> +
> +#ifndef __MTK_CMDQ_H__
> +#define __MTK_CMDQ_H__
> +
> +#include <linux/platform_device.h>
> +#include <linux/types.h>
> +
> +enum cmdq_eng {
> + CMDQ_ENG_DISP_AAL,
> + CMDQ_ENG_DISP_COLOR0,
> + CMDQ_ENG_DISP_COLOR1,
> + CMDQ_ENG_DISP_DPI0,
> + CMDQ_ENG_DISP_DSI0,
> + CMDQ_ENG_DISP_DSI1,
> + CMDQ_ENG_DISP_GAMMA,
> + CMDQ_ENG_DISP_OD,
> + CMDQ_ENG_DISP_OVL0,
> + CMDQ_ENG_DISP_OVL1,
> + CMDQ_ENG_DISP_PWM0,
> + CMDQ_ENG_DISP_PWM1,
> + CMDQ_ENG_DISP_RDMA0,
> + CMDQ_ENG_DISP_RDMA1,
> + CMDQ_ENG_DISP_RDMA2,
> + CMDQ_ENG_DISP_UFOE,
> + CMDQ_ENG_DISP_WDMA0,
> + CMDQ_ENG_DISP_WDMA1,
> + CMDQ_ENG_MAX,
> +};
> +
> +/* events for CMDQ and display */
> +enum cmdq_event {
> + /* Display start of frame(SOF) events */
> + CMDQ_EVENT_DISP_OVL0_SOF = 11,
> + CMDQ_EVENT_DISP_OVL1_SOF = 12,
> + CMDQ_EVENT_DISP_RDMA0_SOF = 13,
> + CMDQ_EVENT_DISP_RDMA1_SOF = 14,
> + CMDQ_EVENT_DISP_RDMA2_SOF = 15,
> + CMDQ_EVENT_DISP_WDMA0_SOF = 16,
> + CMDQ_EVENT_DISP_WDMA1_SOF = 17,
> + /* Display end of frame(EOF) events */
> + CMDQ_EVENT_DISP_OVL0_EOF = 39,
> + CMDQ_EVENT_DISP_OVL1_EOF = 40,
> + CMDQ_EVENT_DISP_RDMA0_EOF = 41,
> + CMDQ_EVENT_DISP_RDMA1_EOF = 42,
> + CMDQ_EVENT_DISP_RDMA2_EOF = 43,
> + CMDQ_EVENT_DISP_WDMA0_EOF = 44,
> + CMDQ_EVENT_DISP_WDMA1_EOF = 45,
> + /* Mutex end of frame(EOF) events */
> + CMDQ_EVENT_MUTEX0_STREAM_EOF = 53,
> + CMDQ_EVENT_MUTEX1_STREAM_EOF = 54,
> + CMDQ_EVENT_MUTEX2_STREAM_EOF = 55,
> + CMDQ_EVENT_MUTEX3_STREAM_EOF = 56,
> + CMDQ_EVENT_MUTEX4_STREAM_EOF = 57,
> + /* Display underrun events */
> + CMDQ_EVENT_DISP_RDMA0_UNDERRUN = 63,
> + CMDQ_EVENT_DISP_RDMA1_UNDERRUN = 64,
> + CMDQ_EVENT_DISP_RDMA2_UNDERRUN = 65,
> + /* Keep this at the end of HW events */
> + CMDQ_MAX_HW_EVENT_COUNT = 260,
> + /* This is max event and also can be used as mask. */
> + CMDQ_SYNC_TOKEN_MAX = 0x1ff,
> + /* Invalid event */
> + CMDQ_SYNC_TOKEN_INVALID = -1,
> +};
> +
> +/* called after isr done or task done */
> +typedef int (*cmdq_async_flush_cb)(void *data);
> +
> +struct cmdq_task;
> +struct cmdq;
> +
> +struct cmdq_rec {
> + struct cmdq *cqctx;
> + u64 engine_flag;
> + size_t block_size; /* command size */
> + void *buf_ptr;
> + size_t buf_size;
> + /* running task after flush */
> + struct cmdq_task *running_task_ptr;
> + bool finalized;
> +};
> +
> +/**
> + * cmdq_rec_create() - create command queue record handle
> + * @dev: device
> + * @engine_flag: command queue engine flag
> + * @handle_ptr: command queue record handle pointer to retrieve cmdq_rec
> + *
> + * Return: 0 for success; else the error code is returned
> + */
> +int cmdq_rec_create(struct device *dev, u64 engine_flag,
> + struct cmdq_rec **handle_ptr);
> +
> +/**
> + * cmdq_rec_reset() - reset command queue record commands
> + * @handle: the command queue record handle
> + *
> + * Return: 0 for success; else the error code is returned
> + */
> +int cmdq_rec_reset(struct cmdq_rec *handle);
> +
> +/**
> + * cmdq_rec_write() - append write command to the command queue record
> + * @handle: the command queue record handle
> + * @value: the specified target register value
> + * @addr: the specified target register physical address
> + *
> + * Return: 0 for success; else the error code is returned
> + */
> +int cmdq_rec_write(struct cmdq_rec *handle, u32 value, u32 addr);
> +
> +/**
> + * cmdq_rec_write_mask() - append write command with mask to the command queue
> + * record
> + * @handle: the command queue record handle
> + * @value: the specified target register value
> + * @addr: the specified target register physical address
> + * @mask: the specified target register mask
> + *
> + * Return: 0 for success; else the error code is returned
> + */
> +int cmdq_rec_write_mask(struct cmdq_rec *handle, u32 value,
> + u32 addr, u32 mask);
> +
> +/**
> + * cmdq_rec_wait() - append wait command to the command queue record
> + * @handle: the command queue record handle
> + * @event: the desired event type to "wait and CLEAR"
> + *
> + * Return: 0 for success; else the error code is returned
> + */
> +int cmdq_rec_wait(struct cmdq_rec *handle, enum cmdq_event event);
> +
> +/**
> + * cmdq_rec_clear_event() - append clear event command to the command queue
> + * record
> + * @handle: the command queue record handle
> + * @event: the desired event to be cleared
> + *
> + * Return: 0 for success; else the error code is returned
> + */
> +int cmdq_rec_clear_event(struct cmdq_rec *handle, enum cmdq_event event);
> +
> +/**
> + * cmdq_rec_flush() - trigger CMDQ to execute the recorded commands
> + * @handle: the command queue record handle
> + *
> + * Return: 0 for success; else the error code is returned
> + *
> + * Trigger CMDQ to execute the recorded commands. Note that this is a
> + * synchronous flush function. When the function returned, the recorded
> + * commands have been done.
> + */
> +int cmdq_rec_flush(struct cmdq_rec *handle);
> +
> +/**
> + * cmdq_rec_flush_async() - trigger CMDQ to asynchronously execute the
> + * recorded commands
> + * @handle: the command queue record handle
> + *
> + * Return: 0 for successfully start execution; else the error code is returned
> + *
> + * Trigger CMDQ to asynchronously execute the recorded commands. Note that this
> + * is an ASYNC function. When the function returned, it may or may not be
> + * finished. There is no way to retrieve the result.
> + */
> +int cmdq_rec_flush_async(struct cmdq_rec *handle);
> +
> +/**
> + * cmdq_rec_flush_async_callback() - trigger CMDQ to asynchronously execute
> + * the recorded commands and call back after
> + * ISR is finished and this flush is finished
> + * @handle: the command queue record handle
> + * @isr_cb: called by ISR in the end of CMDQ ISR
> + * @isr_data: this data will pass back to isr_cb
> + * @done_cb: called after flush is done
> + * @done_data: this data will pass back to done_cb
> + *
> + * Return: 0 for success; else the error code is returned
> + *
> + * Trigger CMDQ to asynchronously execute the recorded commands and call back
> + * after ISR is finished and this flush is finished. Note that this is an ASYNC
> + * function. When the function returned, it may or may not be finished. The ISR
> + * callback function is called in the end of ISR, and the done callback
> + * function is called after all commands are done.
> + */
> +int cmdq_rec_flush_async_callback(struct cmdq_rec *handle,
> + cmdq_async_flush_cb isr_cb,
> + void *isr_data,
> + cmdq_async_flush_cb done_cb,
> + void *done_data);
> +
> +/**
> + * cmdq_rec_destroy() - destroy command queue record handle
> + * @handle: the command queue record handle
> + */
> +void cmdq_rec_destroy(struct cmdq_rec *handle);
> +
> +#endif /* __MTK_CMDQ_H__ */
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC 02/11] drm/bridge: GPIO-controlled display multiplexer driver
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-09-29 13:09 ` [RFC 01/11] soc: mediatek: MediaTek Command Queue (CMDQ) driver Ulrich Hecht
@ 2017-09-29 13:09 ` Ulrich Hecht
2017-09-29 13:09 ` [RFC 03/11] platform/chrome: ChromeOS firmware interface driver Ulrich Hecht
` (4 subsequent siblings)
6 siblings, 0 replies; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Ported from chromeos-3.18 kernel.
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
drivers/gpu/drm/bridge/Kconfig | 11 ++
drivers/gpu/drm/bridge/Makefile | 1 +
drivers/gpu/drm/bridge/generic-gpio-mux.c | 316 ++++++++++++++++++++++++++++++
3 files changed, 328 insertions(+)
create mode 100644 drivers/gpu/drm/bridge/generic-gpio-mux.c
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index adf9ae0..966f4eb 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -15,6 +15,17 @@ config DRM_PANEL_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
+config DRM_GENERIC_GPIO_MUX
+ tristate "Generic GPIO-controlled mux"
+ depends on DRM
+ depends on OF
+ select DRM_KMS_HELPER
+ ---help---
+ This bridge driver models a GPIO-controlled display mux with one
+ input, 2 outputs (e.g. an HDMI mux). The hardware decides which output
+ is active, reports it as a GPIO, and the driver redirects calls to the
+ appropriate downstream bridge (if any).
+
config DRM_ANALOGIX_ANX78XX
tristate "Analogix ANX78XX bridge"
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index defcf1e..2d5652e 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
+obj-$(CONFIG_DRM_GENERIC_GPIO_MUX) += generic-gpio-mux.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
diff --git a/drivers/gpu/drm/bridge/generic-gpio-mux.c b/drivers/gpu/drm/bridge/generic-gpio-mux.c
new file mode 100644
index 0000000..744804b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/generic-gpio-mux.c
@@ -0,0 +1,316 @@
+/*
+ * ANX7688 HDMI->DP bridge driver
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+struct gpio_display_mux {
+ struct device *dev;
+
+ struct gpio_desc *gpiod_detect;
+ int detect_irq;
+
+ struct drm_bridge bridge;
+
+ struct drm_bridge *next[2];
+};
+
+static inline struct gpio_display_mux *bridge_to_gpio_display_mux(
+ struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct gpio_display_mux, bridge);
+}
+
+static irqreturn_t gpio_display_mux_det_threaded_handler(int unused, void *data)
+{
+ struct gpio_display_mux *gpio_display_mux = data;
+ int active = gpiod_get_value(gpio_display_mux->gpiod_detect);
+
+ dev_dbg(gpio_display_mux->dev, "Interrupt %d!\n", active);
+
+ if (gpio_display_mux->bridge.dev)
+ drm_kms_helper_hotplug_event(gpio_display_mux->bridge.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int gpio_display_mux_attach(struct drm_bridge *bridge)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ struct drm_bridge *next;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_display_mux->next); i++) {
+ next = gpio_display_mux->next[i];
+ if (next)
+ next->encoder = bridge->encoder;
+ }
+
+ return 0;
+}
+
+static bool gpio_display_mux_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ int active;
+ struct drm_bridge *next;
+
+ active = gpiod_get_value(gpio_display_mux->gpiod_detect);
+ next = gpio_display_mux->next[active];
+
+ if (next && next->funcs->mode_fixup)
+ return next->funcs->mode_fixup(next, mode, adjusted_mode);
+ else
+ return true;
+}
+
+static void gpio_display_mux_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ int active;
+ struct drm_bridge *next;
+
+ active = gpiod_get_value(gpio_display_mux->gpiod_detect);
+ next = gpio_display_mux->next[active];
+
+ if (next && next->funcs->mode_set)
+ next->funcs->mode_set(next, mode, adjusted_mode);
+}
+
+/**
+ * Since this driver _reacts_ to mux changes, we need to make sure all
+ * downstream bridges are pre-enabled.
+ */
+static void gpio_display_mux_pre_enable(struct drm_bridge *bridge)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ struct drm_bridge *next;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_display_mux->next); i++) {
+ next = gpio_display_mux->next[i];
+ if (next && next->funcs->pre_enable)
+ next->funcs->pre_enable(next);
+ }
+}
+
+static void gpio_display_mux_post_disable(struct drm_bridge *bridge)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ struct drm_bridge *next;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_display_mux->next); i++) {
+ next = gpio_display_mux->next[i];
+ if (next && next->funcs->post_disable)
+ next->funcs->post_disable(next);
+ }
+}
+
+/**
+ * In an ideal mux driver, only the currently selected bridge should be enabled.
+ * For the sake of simplicity, we just just enable/disable all downstream
+ * bridges at the same time.
+ */
+static void gpio_display_mux_enable(struct drm_bridge *bridge)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ struct drm_bridge *next;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_display_mux->next); i++) {
+ next = gpio_display_mux->next[i];
+ if (next && next->funcs->enable)
+ next->funcs->enable(next);
+ }
+}
+
+static void gpio_display_mux_disable(struct drm_bridge *bridge)
+{
+ struct gpio_display_mux *gpio_display_mux =
+ bridge_to_gpio_display_mux(bridge);
+ struct drm_bridge *next;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_display_mux->next); i++) {
+ next = gpio_display_mux->next[i];
+ if (next && next->funcs->disable)
+ next->funcs->disable(next);
+ }
+}
+
+static const struct drm_bridge_funcs gpio_display_mux_bridge_funcs = {
+ .attach = gpio_display_mux_attach,
+ .mode_fixup = gpio_display_mux_mode_fixup,
+ .disable = gpio_display_mux_disable,
+ .post_disable = gpio_display_mux_post_disable,
+ .mode_set = gpio_display_mux_mode_set,
+ .pre_enable = gpio_display_mux_pre_enable,
+ .enable = gpio_display_mux_enable,
+};
+
+static int gpio_display_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_display_mux *gpio_display_mux;
+ struct device_node *port, *ep, *remote;
+ int ret;
+ u32 reg;
+
+ gpio_display_mux = devm_kzalloc(dev, sizeof(*gpio_display_mux),
+ GFP_KERNEL);
+ if (!gpio_display_mux)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio_display_mux);
+ gpio_display_mux->dev = &pdev->dev;
+
+ gpio_display_mux->bridge.of_node = dev->of_node;
+
+ gpio_display_mux->gpiod_detect =
+ devm_gpiod_get(dev, "detect", GPIOD_IN);
+ if (IS_ERR(gpio_display_mux->gpiod_detect))
+ return PTR_ERR(gpio_display_mux->gpiod_detect);
+
+ gpio_display_mux->detect_irq =
+ gpiod_to_irq(gpio_display_mux->gpiod_detect);
+ if (gpio_display_mux->detect_irq < 0) {
+ dev_err(dev, "Failed to get output irq %d\n",
+ gpio_display_mux->detect_irq);
+ return -ENODEV;
+ }
+
+ port = of_graph_get_port_by_id(dev->of_node, 1);
+ if (!port) {
+ dev_err(dev, "Missing output port node\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(port, ep) {
+ if (!ep->name || (of_node_cmp(ep->name, "endpoint") != 0)) {
+ of_node_put(ep);
+ continue;
+ }
+
+ if (of_property_read_u32(ep, "reg", ®) < 0 ||
+ reg >= ARRAY_SIZE(gpio_display_mux->next)) {
+ dev_err(dev,
+ "Missing/invalid reg property for endpoint %s\n",
+ ep->full_name);
+ of_node_put(ep);
+ of_node_put(port);
+ return -EINVAL;
+ }
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_err(dev,
+ "Missing connector/bridge node for endpoint %s\n",
+ ep->full_name);
+ of_node_put(ep);
+ of_node_put(port);
+ return -EINVAL;
+ }
+ of_node_put(ep);
+
+ if (of_device_is_compatible(remote, "hdmi-connector")) {
+ of_node_put(remote);
+ continue;
+ }
+
+ gpio_display_mux->next[reg] = of_drm_find_bridge(remote);
+ if (!gpio_display_mux->next[reg]) {
+ dev_err(dev, "Waiting for external bridge %s\n",
+ remote->name);
+ of_node_put(remote);
+ of_node_put(port);
+ return -EPROBE_DEFER;
+ }
+
+ of_node_put(remote);
+ }
+ of_node_put(port);
+
+ gpio_display_mux->bridge.funcs = &gpio_display_mux_bridge_funcs;
+ ret = drm_bridge_add(&gpio_display_mux->bridge);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add drm bridge\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, gpio_display_mux->detect_irq,
+ NULL,
+ gpio_display_mux_det_threaded_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gpio-display-mux-det", gpio_display_mux);
+ if (ret) {
+ dev_err(dev, "Failed to request MUX_DET threaded irq\n");
+ goto err_bridge_remove;
+ }
+
+ return 0;
+
+err_bridge_remove:
+ drm_bridge_remove(&gpio_display_mux->bridge);
+
+ return ret;
+}
+
+static int gpio_display_mux_remove(struct platform_device *pdev)
+{
+ struct gpio_display_mux *gpio_display_mux = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&gpio_display_mux->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id gpio_display_mux_match[] = {
+ { .compatible = "gpio-display-mux", },
+ {},
+};
+
+struct platform_driver gpio_display_mux_driver = {
+ .probe = gpio_display_mux_probe,
+ .remove = gpio_display_mux_remove,
+ .driver = {
+ .name = "gpio-display-mux",
+ .of_match_table = gpio_display_mux_match,
+ },
+};
+
+module_platform_driver(gpio_display_mux_driver);
+
+MODULE_DESCRIPTION("GPIO-controlled display mux");
+MODULE_AUTHOR("Nicolas Boichat <drinkcat-F7+t8E8rja9g9hUCZPvPmw@public.gmane.org>");
+MODULE_LICENSE("GPL v2");
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC 03/11] platform/chrome: ChromeOS firmware interface driver
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-09-29 13:09 ` [RFC 01/11] soc: mediatek: MediaTek Command Queue (CMDQ) driver Ulrich Hecht
2017-09-29 13:09 ` [RFC 02/11] drm/bridge: GPIO-controlled display multiplexer driver Ulrich Hecht
@ 2017-09-29 13:09 ` Ulrich Hecht
2017-09-29 13:09 ` [RFC 04/11] drm/bridge: Parade PS8640 MIPI DSI -> eDP converter driver Ulrich Hecht
` (3 subsequent siblings)
6 siblings, 0 replies; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Ported from chromeos-3.18 kernel.
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
drivers/platform/chrome/Kconfig | 18 +++
drivers/platform/chrome/Makefile | 2 +
drivers/platform/chrome/chromeos.c | 120 +++++++++++++++
drivers/platform/chrome/chromeos.h | 61 ++++++++
drivers/platform/chrome/chromeos_arm.c | 264 +++++++++++++++++++++++++++++++++
drivers/platform/chrome/elog.h | 186 +++++++++++++++++++++++
include/linux/chromeos_platform.h | 27 ++++
7 files changed, 678 insertions(+)
create mode 100644 drivers/platform/chrome/chromeos.c
create mode 100644 drivers/platform/chrome/chromeos.h
create mode 100644 drivers/platform/chrome/chromeos_arm.c
create mode 100644 drivers/platform/chrome/elog.h
create mode 100644 include/linux/chromeos_platform.h
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 0ad6e29..f8b1876 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -14,6 +14,24 @@ menuconfig CHROME_PLATFORMS
if CHROME_PLATFORMS
+config CHROMEOS
+ bool
+ depends on CHROMEOS_OF_FIRMWARE || (NVRAM && ACPI_CHROMEOS)
+
+ ---help---
+ Provides abstracted interfaces to the firmware features provided on
+ ChromeOS devices. It depends on a lowlevel driver to implement the
+ firmware interface on the platform.
+
+config CHROMEOS_OF_FIRMWARE
+ bool "ChromeOS firmware interface driver"
+ depends on OF
+ select CHROMEOS
+ ---help---
+ This driver provides an interface to ChromeOS firmware.
+
+ Say Y here if you are building for a ChromeOS device.
+
config CHROMEOS_LAPTOP
tristate "Chrome OS Laptop"
depends on I2C && DMI && X86
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 66c345c..f10a7b6 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,4 +1,6 @@
+obj-$(CONFIG_CHROMEOS) += chromeos.o
+obj-$(CONFIG_CHROMEOS_OF_FIRMWARE) += chromeos_arm.o
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
diff --git a/drivers/platform/chrome/chromeos.c b/drivers/platform/chrome/chromeos.c
new file mode 100644
index 0000000..8b01630
--- /dev/null
+++ b/drivers/platform/chrome/chromeos.c
@@ -0,0 +1,120 @@
+/*
+ * ChromeOS platform support code. Glue layer between higher level functions
+ * and per-platform firmware interfaces.
+ *
+ * Copyright (C) 2010 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/chromeos_platform.h>
+#include <linux/module.h>
+#include "chromeos.h"
+
+static struct chromeos_vbc *chromeos_vbc_ptr;
+
+static int vbc_read(u8 *buf, int buf_size);
+static int vbc_write_byte(unsigned offset, u8 value);
+
+/* the following defines are copied from
+ * vboot_reference:firmware/lib/vboot_nvstorage.c.
+ */
+#define RECOVERY_OFFSET 2
+#define VBNV_RECOVERY_RW_INVALID_OS 0x43
+
+int chromeos_set_need_recovery(void)
+{
+ if (!chromeos_legacy_set_need_recovery())
+ return 0;
+
+ return vbc_write_byte(RECOVERY_OFFSET, VBNV_RECOVERY_RW_INVALID_OS);
+}
+EXPORT_SYMBOL(chromeos_set_need_recovery);
+
+/*
+ * Lifted from vboot_reference:firmware/lib/vboot_nvstorage.c and formatted.
+ *
+ * Return CRC-8 of the data, using x^8 + x^2 + x + 1 polynomial. A table-based
+ * algorithm would be faster, but for only 15 bytes isn't worth the code size.
+ */
+static u8 crc8(const u8 *data, int len)
+{
+ unsigned crc = 0;
+ int i, j;
+
+ for (j = len; j; j--, data++) {
+ crc ^= (*data << 8);
+ for (i = 8; i; i--) {
+ if (crc & 0x8000)
+ crc ^= (0x1070 << 3);
+ crc <<= 1;
+ }
+ }
+ return (u8)(crc >> 8);
+}
+
+static int vbc_write_byte(unsigned offset, u8 value)
+{
+ u8 buf[MAX_VBOOT_CONTEXT_BUFFER_SIZE];
+ ssize_t size;
+
+ if (!chromeos_vbc_ptr)
+ return -ENOSYS;
+
+ size = vbc_read(buf, sizeof(buf));
+ if (size <= 0)
+ return -EINVAL;
+
+ if (offset >= (size - 1))
+ return -EINVAL;
+
+ if (buf[offset] == value)
+ return 0;
+
+ buf[offset] = value;
+ buf[size - 1] = crc8(buf, size - 1);
+
+ return chromeos_vbc_ptr->write(buf, size);
+}
+
+/*
+ * Read vboot context and verify it. If everything checks out, return number
+ * of bytes in the vboot context buffer, -1 on any error (uninitialized
+ * subsystem, corrupted crc8 value, not enough room in the buffer, etc.).
+ */
+static int vbc_read(u8 *buf, int buf_size)
+{
+ ssize_t size;
+
+ if (!chromeos_vbc_ptr)
+ return -ENOSYS;
+
+ size = chromeos_vbc_ptr->read(buf, buf_size);
+ if (size <= 0)
+ return -1;
+
+ if (buf[size - 1] != crc8(buf, size - 1)) {
+ pr_err("%s: vboot context contents corrupted\n", __func__);
+ return -1;
+ }
+ return size;
+}
+
+int chromeos_vbc_register(struct chromeos_vbc *chromeos_vbc)
+{
+ chromeos_vbc_ptr = chromeos_vbc;
+ return 0;
+}
diff --git a/drivers/platform/chrome/chromeos.h b/drivers/platform/chrome/chromeos.h
new file mode 100644
index 0000000..8f4384a
--- /dev/null
+++ b/drivers/platform/chrome/chromeos.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _DRIVERS_PLATFORM_CHROMEOS_H
+#define _DRIVERS_PLATFORM_CHROMEOS_H
+
+#define MAX_VBOOT_CONTEXT_BUFFER_SIZE 64 /* Should be enough for anything. */
+
+#ifdef CONFIG_ACPI_CHROMEOS
+extern int chromeos_legacy_set_need_recovery(void);
+#else
+static inline int chromeos_legacy_set_need_recovery(void) { return -ENODEV; }
+#endif
+
+struct chromeos_vbc {
+ /**
+ * Read vboot context to buffer
+ *
+ * @param buf Pointer to buffer for storing vboot context
+ * @param count Size of buffer
+ * @return on success, the number of bytes read is returned and
+ * on error, -err is returned.
+ */
+ ssize_t (*read)(void *buf, size_t count);
+
+ /**
+ * Write vboot context from buffer
+ *
+ * @param buf Pointer to buffer of new vboot context content
+ * @param count Size of buffer
+ * @return on success, the number of bytes written is returned and
+ * on error, -err is returned.
+ */
+ ssize_t (*write)(const void *buf, size_t count);
+
+ const char *name;
+};
+
+/**
+ * Register chromeos_vbc callbacks.
+ *
+ * @param chromeos_vbc Pointer to struct holding callbacks
+ * @return on success, return 0, on error, -err is returned.
+ */
+int chromeos_vbc_register(struct chromeos_vbc *chromeos_vbc);
+
+#endif /* _DRIVERS_PLATFORM_CHROMEOS_H */
diff --git a/drivers/platform/chrome/chromeos_arm.c b/drivers/platform/chrome/chromeos_arm.c
new file mode 100644
index 0000000..337e24d
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_arm.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2011 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) "chromeos_arm: " fmt
+
+#include <linux/bcd.h>
+#include <linux/gpio.h>
+#include <linux/notifier.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "chromeos.h"
+#include "elog.h"
+
+struct chromeos_arm_elog_panic_buffer {
+ uint32_t start;
+ uint32_t size;
+ void __iomem *virt_addr;
+ struct notifier_block nb;
+};
+
+/*
+ * Update the checksum at the last byte
+ */
+static void elog_update_checksum(struct event_header *event, u8 checksum)
+{
+ u8 *event_data = (u8 *)event;
+ event_data[event->length - 1] = checksum;
+}
+
+/*
+ * Simple byte checksum for events
+ */
+static u8 elog_checksum_event(struct event_header *event)
+{
+ u8 index, checksum = 0;
+ u8 *data = (u8 *)event;
+
+ for (index = 0; index < event->length; index++)
+ checksum += data[index];
+ return checksum;
+}
+
+/*
+ * Populate timestamp in event header with current time
+ */
+static void elog_fill_timestamp(struct event_header *event)
+{
+ struct timeval timeval;
+ struct tm time;
+
+ do_gettimeofday(&timeval);
+ time_to_tm(timeval.tv_sec, 0, &time);
+
+ event->second = bin2bcd(time.tm_sec);
+ event->minute = bin2bcd(time.tm_min);
+ event->hour = bin2bcd(time.tm_hour);
+ event->day = bin2bcd(time.tm_mday);
+ event->month = bin2bcd(time.tm_mon + 1);
+ event->year = bin2bcd(time.tm_year % 100);
+}
+
+/*
+ * Fill out an event structure with space for the data and checksum.
+ */
+void elog_prepare_event(struct event_header *event, u8 event_type, void *data,
+ u8 data_size)
+{
+ event->type = event_type;
+ event->length = sizeof(*event) + data_size + 1;
+ elog_fill_timestamp(event);
+
+ if (data_size)
+ memcpy(&event[1], data, data_size);
+
+ /* Zero the checksum byte and then compute checksum */
+ elog_update_checksum(event, 0);
+ elog_update_checksum(event, -(elog_checksum_event(event)));
+}
+
+static int chromeos_arm_elog_panic(struct notifier_block *this,
+ unsigned long p_event, void *ptr)
+{
+ struct chromeos_arm_elog_panic_buffer *buf;
+ uint32_t reason = ELOG_SHUTDOWN_PANIC;
+ const u8 data_size = sizeof(reason);
+ union {
+ struct event_header hdr;
+ u8 bytes[sizeof(struct event_header) + data_size + 1];
+ } event;
+
+ buf = container_of(this, struct chromeos_arm_elog_panic_buffer, nb);
+ elog_prepare_event(&event.hdr, ELOG_TYPE_OS_EVENT, &reason, data_size);
+ memcpy_toio(buf->virt_addr, event.bytes, sizeof(event.bytes));
+
+ return NOTIFY_DONE;
+}
+
+static int chromeos_arm_panic_init(struct platform_device *pdev, u32 start,
+ u32 size)
+{
+ int ret = -EINVAL;
+ struct chromeos_arm_elog_panic_buffer *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ dev_err(&pdev->dev, "failed to allocate panic notifier.\n");
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ buf->start = start;
+ buf->size = size;
+ buf->nb.notifier_call = chromeos_arm_elog_panic;
+
+ if (!request_mem_region(start, size, "elog panic event")) {
+ dev_err(&pdev->dev, "failed to request panic event buffer.\n");
+ goto fail2;
+ }
+
+ buf->virt_addr = ioremap(start, size);
+ if (!buf->virt_addr) {
+ dev_err(&pdev->dev, "failed to map panic event buffer.\n");
+ goto fail3;
+ }
+
+ atomic_notifier_chain_register(&panic_notifier_list, &buf->nb);
+
+ platform_set_drvdata(pdev, buf);
+
+ return 0;
+
+fail3:
+ release_mem_region(start, size);
+fail2:
+ kfree(buf);
+fail1:
+ return ret;
+}
+
+static int dt_gpio_init(struct platform_device *pdev, const char *of_list_name,
+ const char *gpio_desc_name, const char *sysfs_name)
+{
+ int gpio, err, active_low;
+ enum of_gpio_flags flags;
+ struct device_node *np = pdev->dev.of_node;
+
+ gpio = of_get_named_gpio_flags(np, of_list_name, 0, &flags);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(&pdev->dev, "invalid %s descriptor\n", of_list_name);
+ return -EINVAL;
+ }
+
+ err = gpio_request_one(gpio, GPIOF_DIR_IN, gpio_desc_name);
+ if (err)
+ return err;
+
+ active_low = !!(flags & OF_GPIO_ACTIVE_LOW);
+// err = gpio_sysfs_set_active_low(gpio, active_low);
+// if (err)
+// return err;
+
+ gpio_export(gpio, 0);
+ gpio_export_link(&pdev->dev, sysfs_name, gpio);
+ return 0;
+}
+
+static int chromeos_arm_probe(struct platform_device *pdev)
+{
+ int err;
+ u32 elog_panic_event[2];
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ err = dt_gpio_init(pdev, "write-protect-gpio",
+ "firmware-write-protect", "write-protect");
+ if (err)
+ goto err;
+ err = dt_gpio_init(pdev, "recovery-switch",
+ "firmware-recovery-switch", "recovery-switch");
+ err = dt_gpio_init(pdev, "developer-switch",
+ "firmware-developer-switch", "developer-switch");
+
+ if (!of_property_read_u32_array(np, "elog-panic-event",
+ elog_panic_event,
+ ARRAY_SIZE(elog_panic_event))) {
+ err = chromeos_arm_panic_init(pdev, elog_panic_event[0],
+ elog_panic_event[1]);
+ if (err)
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "chromeos system detected\n");
+
+ err = 0;
+err:
+ of_node_put(np);
+
+ return err;
+}
+
+static int chromeos_arm_remove(struct platform_device *pdev)
+{
+ struct chromeos_arm_elog_panic_buffer *buf;
+
+ buf = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+ if (buf) {
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &buf->nb);
+ release_mem_region(buf->start, buf->size);
+ iounmap(buf->virt_addr);
+ kfree(buf);
+ }
+ return 0;
+}
+
+static struct platform_driver chromeos_arm_driver = {
+ .probe = chromeos_arm_probe,
+ .remove = chromeos_arm_remove,
+ .driver = {
+ .name = "chromeos_arm",
+ },
+};
+
+static int __init chromeos_arm_init(void)
+{
+ struct device_node *fw_dn;
+ struct platform_device *pdev;
+
+ fw_dn = of_find_compatible_node(NULL, NULL, "chromeos-firmware");
+ if (!fw_dn)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("chromeos_arm", -1, NULL, 0);
+ pdev->dev.of_node = fw_dn;
+
+ platform_driver_register(&chromeos_arm_driver);
+
+ return 0;
+}
+subsys_initcall(chromeos_arm_init);
diff --git a/drivers/platform/chrome/elog.h b/drivers/platform/chrome/elog.h
new file mode 100644
index 0000000..84b005f
--- /dev/null
+++ b/drivers/platform/chrome/elog.h
@@ -0,0 +1,186 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2012 The ChromiumOS Authors. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#ifndef ELOG_H_
+#define ELOG_H_
+
+/* SMI command code for GSMI event logging */
+#define ELOG_GSMI_APM_CNT 0xEF
+
+#define MAX_EVENT_SIZE 0x7F
+
+/* End of log */
+#define ELOG_TYPE_EOL 0xFF
+
+/*
+ * Standard SMBIOS event log types below 0x80
+ */
+#define ELOG_TYPE_UNDEFINED_EVENT 0x00
+#define ELOG_TYPE_SINGLE_BIT_ECC_MEM_ERR 0x01
+#define ELOG_TYPE_MULTI_BIT_ECC_MEM_ERR 0x02
+#define ELOG_TYPE_MEM_PARITY_ERR 0x03
+#define ELOG_TYPE_BUS_TIMEOUT 0x04
+#define ELOG_TYPE_IO_CHECK 0x05
+#define ELOG_TYPE_SW_NMI 0x06
+#define ELOG_TYPE_POST_MEM_RESIZE 0x07
+#define ELOG_TYPE_POST_ERR 0x08
+#define ELOG_TYPE_PCI_PERR 0x09
+#define ELOG_TYPE_PCI_SERR 0x0A
+#define ELOG_TYPE_CPU_FAIL 0x0B
+#define ELOG_TYPE_EISA_TIMEOUT 0x0C
+#define ELOG_TYPE_CORRECTABLE_MEMLOG_DIS 0x0D
+#define ELOG_TYPE_LOG_DISABLED 0x0E
+#define ELOG_TYPE_UNDEFINED_EVENT2 0x0F
+#define ELOG_TYPE_SYS_LIMIT_EXCEED 0x10
+#define ELOG_TYPE_ASYNC_HW_TIMER_EXPIRED 0x11
+#define ELOG_TYPE_SYS_CONFIG_INFO 0x12
+#define ELOG_TYPE_HDD_INFO 0x13
+#define ELOG_TYPE_SYS_RECONFIG 0x14
+#define ELOG_TYPE_CPU_ERROR 0x15
+#define ELOG_TYPE_LOG_CLEAR 0x16
+#define ELOG_TYPE_BOOT 0x17
+
+/*
+ * Extended defined OEM event types start at 0x80
+ */
+
+/* OS/kernel events */
+#define ELOG_TYPE_OS_EVENT 0x81
+#define ELOG_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
+#define ELOG_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */
+#define ELOG_SHUTDOWN_PANIC 2 /* Panic */
+#define ELOG_SHUTDOWN_OOPS 3 /* Oops */
+#define ELOG_SHUTDOWN_DIE 4 /* Die - No longer meaningful */
+#define ELOG_SHUTDOWN_MCE 5 /* Machine Check */
+#define ELOG_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */
+#define ELOG_SHUTDOWN_MBE 7 /* Uncorrected ECC */
+#define ELOG_SHUTDOWN_TRIPLE 8 /* Triple Fault */
+#define ELOG_SHUTDOWN_THERMAL 9 /* Critical Thermal Threshold */
+
+/* Last event from coreboot */
+#define ELOG_TYPE_OS_BOOT 0x90
+
+/* Embedded controller event */
+#define ELOG_TYPE_EC_EVENT 0x91
+#define EC_EVENT_LID_CLOSED 0x01
+#define EC_EVENT_LID_OPEN 0x02
+#define EC_EVENT_POWER_BUTTON 0x03
+#define EC_EVENT_AC_CONNECTED 0x04
+#define EC_EVENT_AC_DISCONNECTED 0x05
+#define EC_EVENT_BATTERY_LOW 0x06
+#define EC_EVENT_BATTERY_CRITICAL 0x07
+#define EC_EVENT_BATTERY 0x08
+#define EC_EVENT_THERMAL_THRESHOLD 0x09
+#define EC_EVENT_THERMAL_OVERLOAD 0x0a
+#define EC_EVENT_THERMAL 0x0b
+#define EC_EVENT_USB_CHARGER 0x0c
+#define EC_EVENT_KEY_PRESSED 0x0d
+#define EC_EVENT_INTERFACE_READY 0x0e
+#define EC_EVENT_KEYBOARD_RECOVERY 0x0f
+#define EC_EVENT_THERMAL_SHUTDOWN 0x10
+#define EC_EVENT_BATTERY_SHUTDOWN 0x11
+#define EC_EVENT_FAN_ERROR 0x12
+
+/* Power */
+#define ELOG_TYPE_POWER_FAIL 0x92
+#define ELOG_TYPE_SUS_POWER_FAIL 0x93
+#define ELOG_TYPE_PWROK_FAIL 0x94
+#define ELOG_TYPE_SYS_PWROK_FAIL 0x95
+#define ELOG_TYPE_POWER_ON 0x96
+#define ELOG_TYPE_POWER_BUTTON 0x97
+#define ELOG_TYPE_POWER_BUTTON_OVERRIDE 0x98
+
+/* Reset */
+#define ELOG_TYPE_RESET_BUTTON 0x99
+#define ELOG_TYPE_SYSTEM_RESET 0x9a
+#define ELOG_TYPE_RTC_RESET 0x9b
+#define ELOG_TYPE_TCO_RESET 0x9c
+
+/* Sleep/Wake */
+#define ELOG_TYPE_ACPI_ENTER 0x9d
+#define ELOG_TYPE_ACPI_WAKE 0x9e
+#define ELOG_TYPE_WAKE_SOURCE 0x9f
+#define ELOG_WAKE_SOURCE_PCIE 0x00
+#define ELOG_WAKE_SOURCE_PME 0x01
+#define ELOG_WAKE_SOURCE_PME_INTERNAL 0x02
+#define ELOG_WAKE_SOURCE_RTC 0x03
+#define ELOG_WAKE_SOURCE_GPIO 0x04
+#define ELOG_WAKE_SOURCE_SMBUS 0x05
+#define ELOG_WAKE_SOURCE_PWRBTN 0x06
+struct elog_event_data_wake {
+ u8 source;
+ u32 instance;
+} __packed;
+
+/* Chrome OS related events */
+#define ELOG_TYPE_CROS_DEVELOPER_MODE 0xa0
+#define ELOG_TYPE_CROS_RECOVERY_MODE 0xa1
+#define ELOG_CROS_RECOVERY_MODE_BUTTON 0x02
+
+/* Management Engine Events */
+#define ELOG_TYPE_MANAGEMENT_ENGINE 0xa2
+#define ELOG_TYPE_MANAGEMENT_ENGINE_EXT 0xa4
+struct elog_event_data_me_extended {
+ u8 current_working_state;
+ u8 operation_state;
+ u8 operation_mode;
+ u8 error_code;
+ u8 progress_code;
+ u8 current_pmevent;
+ u8 current_state;
+} __packed;
+
+/* Last post code from previous boot */
+#define ELOG_TYPE_LAST_POST_CODE 0xa3
+
+/* EC Shutdown Reason */
+#define ELOG_TYPE_EC_SHUTDOWN 0xa5
+
+/* ARM/generic versions of sleep/wake */
+#define ELOG_TYPE_SLEEP 0xa7
+#define ELOG_TYPE_WAKE 0xa8
+
+/* ELOG header */
+struct elog_header {
+ u32 magic;
+ u8 version;
+ u8 header_size;
+ u8 reserved[2];
+} __packed;
+
+/* ELOG related constants */
+#define ELOG_SIGNATURE 0x474f4c45 /* 'ELOG' */
+#define ELOG_VERSION 1
+
+/* SMBIOS event log header */
+struct event_header {
+ u8 type;
+ u8 length;
+ u8 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+} __packed;
+
+extern void elog_prepare_event(struct event_header *event, u8 event_type,
+ void *data, u8 data_size);
+
+#endif /* ELOG_H_ */
diff --git a/include/linux/chromeos_platform.h b/include/linux/chromeos_platform.h
new file mode 100644
index 0000000..32c4510
--- /dev/null
+++ b/include/linux/chromeos_platform.h
@@ -0,0 +1,27 @@
+#ifndef _LINUX_CHROMEOS_PLATFORM_H
+#define _LINUX_CHROMEOS_PLATFORM_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_CHROMEOS
+/*
+ * ChromeOS platform support code. Glue layer between higher level functions
+ * and per-platform firmware interfaces.
+ */
+
+/*
+ * Set the taint bit telling firmware that the currently running side needs
+ * recovery (or reinstall).
+ */
+extern int chromeos_set_need_recovery(void);
+
+#else
+
+static inline int chromeos_set_need_recovery(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_CHROMEOS */
+
+#endif /* _LINUX_CHROMEOS_PLATFORM_H */
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC 04/11] drm/bridge: Parade PS8640 MIPI DSI -> eDP converter driver
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
` (2 preceding siblings ...)
2017-09-29 13:09 ` [RFC 03/11] platform/chrome: ChromeOS firmware interface driver Ulrich Hecht
@ 2017-09-29 13:09 ` Ulrich Hecht
2017-09-29 13:09 ` [RFC 06/11] arm64: dts: mediatek: Add Elm Rev. 3 device tree Ulrich Hecht
` (2 subsequent siblings)
6 siblings, 0 replies; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Ported from chromeos-3.18 kernel.
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
drivers/gpu/drm/bridge/Kconfig | 12 +
drivers/gpu/drm/bridge/Makefile | 3 +
drivers/gpu/drm/bridge/parade-ps8640.c | 1104 ++++++++++++++++++++++++++++++++
3 files changed, 1119 insertions(+)
create mode 100644 drivers/gpu/drm/bridge/parade-ps8640.c
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 966f4eb..a1c0d95 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -26,6 +26,18 @@ config DRM_GENERIC_GPIO_MUX
is active, reports it as a GPIO, and the driver redirects calls to the
appropriate downstream bridge (if any).
+config DRM_PARADE_PS8640
+ tristate "Parade PS8640 MIPI DSI to eDP Converter"
+ depends on DRM
+ depends on OF
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ ---help---
+ Choose this option if you have PS8640 for display
+ The PS8640 is a high-performance and low-power
+ MIPI DSI to eDP converter
+
config DRM_ANALOGIX_ANX78XX
tristate "Analogix ANX78XX bridge"
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 2d5652e..6be8a9d 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,3 +1,6 @@
+ccflags-y := -Iinclude/drm
+
+obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_GENERIC_GPIO_MUX) += generic-gpio-mux.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
new file mode 100644
index 0000000..8489f6c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+#include <drm/drm_panel.h>
+
+#include <drmP.h>
+#include <drm_atomic_helper.h>
+#include <drm_crtc_helper.h>
+#include <drm_crtc.h>
+#include <drm_edid.h>
+#include <drm_mipi_dsi.h>
+
+#define PAGE1_VSTART 0x6b
+#define PAGE2_SPI_CFG3 0x82
+#define I2C_TO_SPI_RESET 0x20
+#define PAGE2_ROMADD_BYTE1 0x8e
+#define PAGE2_ROMADD_BYTE2 0x8f
+#define PAGE2_SWSPI_WDATA 0x90
+#define PAGE2_SWSPI_RDATA 0x91
+#define PAGE2_SWSPI_LEN 0x92
+#define PAGE2_SWSPI_CTL 0x93
+#define TRIGGER_NO_READBACK 0x05
+#define TRIGGER_READBACK 0x01
+#define PAGE2_SPI_STATUS 0x9e
+#define SPI_READY 0x0c
+#define PAGE2_GPIO_L 0xa6
+#define PAGE2_GPIO_H 0xa7
+#define PS_GPIO9 BIT(1)
+#define PAGE2_IROM_CTRL 0xb0
+#define IROM_ENABLE 0xc0
+#define IROM_DISABLE 0x80
+#define PAGE2_SW_RESET 0xbc
+#define SPI_SW_RESET BIT(7)
+#define MPU_SW_RESET BIT(6)
+#define PAGE2_ENCTLSPI_WR 0xda
+#define PAGE2_I2C_BYPASS 0xea
+#define I2C_BYPASS_EN 0xd0
+#define PAGE2_MCS_EN 0xf3
+#define MCS_EN BIT(0)
+#define PAGE3_SET_ADD 0xfe
+#define PAGE3_SET_VAL 0xff
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+#define PAGE4_REV_L 0xf0
+#define PAGE4_REV_H 0xf1
+#define PAGE4_CHIP_L 0xf2
+#define PAGE4_CHIP_H 0xf3
+
+/* Firmware */
+#define SPI_MAX_RETRY_CNT 8
+#define PS_FW_NAME "ps864x_fw.bin"
+
+#define FW_CHIP_ID_OFFSET 0
+#define FW_VERSION_OFFSET 2
+#define EDID_I2C_ADDR 0x50
+
+#define WRITE_STATUS_REG_CMD 0x01
+#define READ_STATUS_REG_CMD 0x05
+#define BUSY BIT(0)
+#define CLEAR_ALL_PROTECT 0x00
+#define BLK_PROTECT_BITS 0x0c
+#define STATUS_REG_PROTECT BIT(7)
+#define WRITE_ENABLE_CMD 0x06
+#define CHIP_ERASE_CMD 0xc7
+#define MAX_DEVS 0x8
+struct ps8640_info {
+ u8 family_id;
+ u8 variant_id;
+ u16 version;
+};
+
+struct ps8640 {
+ struct drm_connector connector;
+ struct drm_bridge bridge;
+ struct edid *edid;
+ struct mipi_dsi_device dsi;
+ struct i2c_client *page[MAX_DEVS];
+ struct i2c_client *ddc_i2c;
+ struct regulator *v33;
+ struct regulator *v12;
+ struct drm_panel *panel;
+ struct gpio_desc *gpio_rst_n;
+ struct gpio_desc *gpio_slp_n;
+ struct gpio_desc *gpio_mode_sel_n;
+ bool enabled;
+
+ /* firmware file info */
+ struct ps8640_info info;
+ bool in_fw_update;
+ /* for firmware update protect */
+ struct mutex fw_mutex;
+};
+
+static const u8 enc_ctrl_code[6] = { 0xaa, 0x55, 0x50, 0x41, 0x52, 0x44 };
+static const u8 hw_chip_id[4] = { 0x00, 0x0a, 0x00, 0x30 };
+
+static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
+{
+ return container_of(e, struct ps8640, bridge);
+}
+
+static inline struct ps8640 *connector_to_ps8640(struct drm_connector *e)
+{
+ return container_of(e, struct ps8640, connector);
+}
+
+static int ps8640_read(struct i2c_client *client, u8 reg, u8 *data,
+ u16 data_len)
+{
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = ®,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = data_len,
+ .buf = data,
+ }
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+
+ if (ret == 2)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int ps8640_write_bytes(struct i2c_client *client, const u8 *data,
+ u16 data_len)
+{
+ int ret;
+ struct i2c_msg msg;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = data_len;
+ msg.buf = (u8 *)data;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int ps8640_write_byte(struct i2c_client *client, u8 reg, u8 data)
+{
+ u8 buf[] = { reg, data };
+
+ return ps8640_write_bytes(client, buf, sizeof(buf));
+}
+
+static void ps8640_get_mcu_fw_version(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[5];
+ u8 fw_ver[2];
+
+ ps8640_read(client, 0x4, fw_ver, sizeof(fw_ver));
+ ps_bridge->info.version = (fw_ver[0] << 8) | fw_ver[1];
+
+ DRM_INFO_ONCE("ps8640 rom fw version %d.%d\n", fw_ver[0], fw_ver[1]);
+}
+
+static int ps8640_bridge_unmute(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[3];
+ u8 vdo_ctrl_buf[3] = { PAGE3_SET_ADD, VDO_CTL_ADD, VDO_EN };
+
+ return ps8640_write_bytes(client, vdo_ctrl_buf, sizeof(vdo_ctrl_buf));
+}
+
+static int ps8640_bridge_mute(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[3];
+ u8 vdo_ctrl_buf[3] = { PAGE3_SET_ADD, VDO_CTL_ADD, VDO_DIS };
+
+ return ps8640_write_bytes(client, vdo_ctrl_buf, sizeof(vdo_ctrl_buf));
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct i2c_client *client = ps_bridge->page[2];
+ struct i2c_client *page1 = ps_bridge->page[1];
+ int err;
+ u8 set_vdo_done, mcs_en, vstart;
+ ktime_t timeout;
+
+ if (ps_bridge->in_fw_update)
+ return;
+
+ if (ps_bridge->enabled)
+ return;
+
+ err = drm_panel_prepare(ps_bridge->panel);
+ if (err < 0) {
+ DRM_ERROR("failed to prepare panel: %d\n", err);
+ return;
+ }
+
+ err = regulator_enable(ps_bridge->v12);
+ if (err) {
+ DRM_ERROR("failed to enable power 1.2v %d", err);
+ goto err_panel_unprepare;
+ }
+
+ usleep_range(5000, 5500);
+
+ err = regulator_enable(ps_bridge->v33);
+ if (err) {
+ DRM_ERROR("failed to enable power 3.3v %d", err);
+ goto err_pwr_v12_disable;
+ }
+
+ gpiod_set_value(ps_bridge->gpio_slp_n, 1);
+ usleep_range(2000, 2500);
+ gpiod_set_value(ps_bridge->gpio_rst_n, 1);
+
+ /*
+ * Wait for the ps8640 embed mcu ready
+ * First wait 200ms and then check the mcu ready flag every 20ms
+ */
+ msleep(200);
+
+ timeout = ktime_add_ms(ktime_get(), 200);
+ for (;;) {
+ err = ps8640_read(client, PAGE2_GPIO_H, &set_vdo_done, 1);
+ if (err < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", err);
+ goto err_pwr_v33_disable;
+ }
+ if ((set_vdo_done & PS_GPIO9) == PS_GPIO9)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ break;
+ msleep(20);
+ }
+
+ msleep(50);
+
+ ps8640_read(page1, PAGE1_VSTART, &vstart, 1);
+ DRM_INFO("PS8640 PAGE1.0x6B = 0x%x\n", vstart);
+
+ /**
+ * The Manufacturer Command Set (MCS) is a device dependent interface
+ * intended for factory programming of the display module default
+ * parameters. Once the display module is configured, the MCS shall be
+ * disabled by the manufacturer. Once disabled, all MCS commands are
+ * ignored by the display interface.
+ */
+ ps8640_read(client, PAGE2_MCS_EN, &mcs_en, 1);
+ ps8640_write_byte(client, PAGE2_MCS_EN, mcs_en & ~MCS_EN);
+
+ if (ps_bridge->info.version == 0)
+ ps8640_get_mcu_fw_version(ps_bridge);
+
+ err = ps8640_bridge_unmute(ps_bridge);
+ if (err)
+ DRM_ERROR("failed to enable unmutevideo: %d\n", err);
+ /* Switch access edp panel's edid through i2c */
+ ps8640_write_byte(client, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
+ ps_bridge->enabled = true;
+
+ return;
+
+err_pwr_v33_disable:
+ gpiod_set_value(ps_bridge->gpio_rst_n, 0);
+ gpiod_set_value(ps_bridge->gpio_slp_n, 0);
+ regulator_disable(ps_bridge->v33);
+err_pwr_v12_disable:
+ regulator_disable(ps_bridge->v12);
+err_panel_unprepare:
+ drm_panel_unprepare(ps_bridge->panel);
+}
+
+static void ps8640_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int err;
+
+ err = drm_panel_enable(ps_bridge->panel);
+ if (err < 0)
+ DRM_ERROR("failed to enable panel: %d\n", err);
+}
+
+static void ps8640_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int err;
+
+ err = drm_panel_disable(ps_bridge->panel);
+ if (err < 0)
+ DRM_ERROR("failed to disable panel: %d\n", err);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int err;
+
+ if (ps_bridge->in_fw_update)
+ return;
+
+ if (!ps_bridge->enabled)
+ return;
+
+ ps_bridge->enabled = false;
+
+ err = ps8640_bridge_mute(ps_bridge);
+ if (err < 0)
+ DRM_ERROR("failed to unmutevideo: %d\n", err);
+
+ gpiod_set_value(ps_bridge->gpio_rst_n, 0);
+ gpiod_set_value(ps_bridge->gpio_slp_n, 0);
+
+ err = regulator_disable(ps_bridge->v33);
+ if (err)
+ DRM_ERROR("failed to disable power 3.3v: %d\n", err);
+
+ err = regulator_disable(ps_bridge->v12);
+ if (err)
+ DRM_ERROR("failed to disable power 1.2v: %d\n", err);
+
+ err = drm_panel_unprepare(ps_bridge->panel);
+ if (err)
+ DRM_ERROR("failed to unprepare panel: %d\n", err);
+}
+
+static int ps8640_get_modes(struct drm_connector *connector)
+{
+ struct ps8640 *ps_bridge = connector_to_ps8640(connector);
+ struct device *dev = &ps_bridge->page[0]->dev;
+ struct edid *edid;
+ int num_modes = 0;
+ bool power_off;
+
+ if (ps_bridge->edid)
+ return drm_add_edid_modes(connector, ps_bridge->edid);
+
+ power_off = !ps_bridge->enabled;
+ ps8640_pre_enable(&ps_bridge->bridge);
+
+ edid = devm_kmalloc(dev, sizeof(edid), GFP_KERNEL);
+ if (!edid) {
+ DRM_ERROR("Failed to allocate EDID\n");
+ return 0;
+ }
+
+ edid = drm_get_edid(connector, ps_bridge->ddc_i2c->adapter);
+ if (edid == NULL)
+ goto out;
+
+ ps_bridge->edid = edid;
+ drm_mode_connector_update_edid_property(connector, ps_bridge->edid);
+ num_modes = drm_add_edid_modes(connector, ps_bridge->edid);
+
+out:
+ if (power_off)
+ ps8640_post_disable(&ps_bridge->bridge);
+
+ return num_modes;
+}
+
+static struct drm_encoder *ps8640_best_encoder(struct drm_connector *connector)
+{
+ struct ps8640 *ps_bridge = connector_to_ps8640(connector);
+
+ return ps_bridge->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ps8640_connector_helper_funcs = {
+ .get_modes = ps8640_get_modes,
+ .best_encoder = ps8640_best_encoder,
+};
+
+static enum drm_connector_status ps8640_detect(struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_funcs ps8640_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ps8640_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int ps8640_bridge_attach(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct device *dev = &ps_bridge->page[0]->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *port, *in_ep;
+ struct device_node *dsi_node = NULL;
+ struct mipi_dsi_host *host = ps_bridge->dsi.host;
+ int ret;
+
+ ret = drm_connector_init(bridge->dev, &ps_bridge->connector,
+ &ps8640_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(&ps_bridge->connector,
+ &ps8640_connector_helper_funcs);
+
+ ps_bridge->connector.dpms = DRM_MODE_DPMS_ON;
+ drm_mode_connector_attach_encoder(&ps_bridge->connector,
+ bridge->encoder);
+
+ if (ps_bridge->panel)
+ drm_panel_attach(ps_bridge->panel, &ps_bridge->connector);
+
+ /* port@0 is ps8640 dsi input port */
+ port = of_graph_get_port_by_id(np, 0);
+ if (port) {
+ in_ep = of_get_child_by_name(port, "endpoint");
+ of_node_put(port);
+ if (in_ep) {
+ dsi_node = of_graph_get_remote_port_parent(in_ep);
+ of_node_put(in_ep);
+ }
+ }
+ if (dsi_node) {
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ of_node_put(dsi_node);
+ if (!host) {
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
+ ps_bridge->dsi.host = host;
+ ps_bridge->dsi.mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ ps_bridge->dsi.format = MIPI_DSI_FMT_RGB888;
+ ps_bridge->dsi.lanes = 4;
+ ret = mipi_dsi_attach(&ps_bridge->dsi);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ if (ps_bridge->panel)
+ drm_panel_detach(ps_bridge->panel);
+ drm_connector_cleanup(&ps_bridge->connector);
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8640_bridge_funcs = {
+ .attach = ps8640_bridge_attach,
+ .disable = ps8640_disable,
+ .post_disable = ps8640_post_disable,
+ .pre_enable = ps8640_pre_enable,
+ .enable = ps8640_enable,
+};
+
+/* Firmware Version is returned as Major.Minor */
+static ssize_t ps8640_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ps8640 *ps_bridge = dev_get_drvdata(dev);
+ struct ps8640_info *info = &ps_bridge->info;
+
+ return scnprintf(buf, PAGE_SIZE, "%u.%u\n", info->version >> 8,
+ info->version & 0xff);
+}
+
+/* Hardware Version is returned as FamilyID.VariantID */
+static ssize_t ps8640_hw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ps8640 *ps_bridge = dev_get_drvdata(dev);
+ struct ps8640_info *info = &ps_bridge->info;
+
+ return scnprintf(buf, PAGE_SIZE, "ps%u.%u\n", info->family_id,
+ info->variant_id);
+}
+
+static int ps8640_spi_send_cmd(struct ps8640 *ps_bridge, u8 *cmd, u8 cmd_len)
+{
+ struct i2c_client *client = ps_bridge->page[2];
+ u8 i, buf[3] = { PAGE2_SWSPI_LEN, cmd_len - 1, TRIGGER_NO_READBACK };
+ int ret;
+
+ ret = ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_ENABLE);
+ if (ret)
+ goto err;
+
+ /* write command in write port */
+ for (i = 0; i < cmd_len; i++) {
+ ret = ps8640_write_byte(client, PAGE2_SWSPI_WDATA, cmd[i]);
+ if (ret)
+ goto err_irom_disable;
+ }
+
+ ret = ps8640_write_bytes(client, buf, sizeof(buf));
+ if (ret)
+ goto err_irom_disable;
+
+ ret = ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_DISABLE);
+ if (ret)
+ goto err;
+
+ return 0;
+err_irom_disable:
+ ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_DISABLE);
+err:
+ dev_err(&client->dev, "send command err: %d\n", ret);
+ return ret;
+}
+
+static int ps8640_wait_spi_ready(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[2];
+ u8 spi_rdy_st;
+ ktime_t timeout;
+
+ timeout = ktime_add_ms(ktime_get(), 200);
+ for (;;) {
+ ps8640_read(client, PAGE2_SPI_STATUS, &spi_rdy_st, 1);
+ if ((spi_rdy_st & SPI_READY) != SPI_READY)
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ dev_err(&client->dev, "wait spi ready timeout\n");
+ return -EBUSY;
+ }
+
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int ps8640_wait_spi_nobusy(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[2];
+ u8 spi_status, buf[3] = { PAGE2_SWSPI_LEN, 0, TRIGGER_READBACK };
+ int ret;
+ ktime_t timeout;
+
+ timeout = ktime_add_ms(ktime_get(), 500);
+ for (;;) {
+ /* 0x05 RDSR; Read-Status-Register */
+ ret = ps8640_write_byte(client, PAGE2_SWSPI_WDATA,
+ READ_STATUS_REG_CMD);
+ if (ret)
+ goto err_send_cmd_exit;
+
+ ret = ps8640_write_bytes(client, buf, 3);
+ if (ret)
+ goto err_send_cmd_exit;
+
+ /* delay for cmd send */
+ usleep_range(300, 500);
+ /* wait for SPI ROM until not busy */
+ ret = ps8640_read(client, PAGE2_SWSPI_RDATA, &spi_status, 1);
+ if (ret)
+ goto err_send_cmd_exit;
+
+ if (!(spi_status & BUSY))
+ break;
+
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ dev_err(&client->dev, "wait spi no busy timeout: %d\n",
+ ret);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+
+err_send_cmd_exit:
+ dev_err(&client->dev, "send command err: %d\n", ret);
+ return ret;
+}
+
+static int ps8640_wait_rom_idle(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[0];
+ int ret;
+
+ ret = ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_ENABLE);
+ if (ret)
+ goto exit;
+
+ ret = ps8640_wait_spi_ready(ps_bridge);
+ if (ret)
+ goto err_spi;
+
+ ret = ps8640_wait_spi_nobusy(ps_bridge);
+ if (ret)
+ goto err_spi;
+
+ ret = ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_DISABLE);
+ if (ret)
+ goto exit;
+
+ return 0;
+
+err_spi:
+ ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_DISABLE);
+exit:
+ if (ret)
+ dev_err(&client->dev, "wait ps8640 rom idle fail: %d\n", ret);
+
+ return ret;
+}
+
+static int ps8640_spi_dl_mode(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[2];
+ int ret;
+
+ /* switch ps8640 mode to spi dl mode */
+ if (ps_bridge->gpio_mode_sel_n)
+ gpiod_set_value(ps_bridge->gpio_mode_sel_n, 0);
+
+ /* reset spi interface */
+ ret = ps8640_write_byte(client, PAGE2_SW_RESET,
+ SPI_SW_RESET | MPU_SW_RESET);
+ if (ret)
+ goto exit;
+
+ ret = ps8640_write_byte(client, PAGE2_SW_RESET, MPU_SW_RESET);
+ if (ret)
+ goto exit;
+
+exit:
+ if (ret)
+ dev_err(&client->dev, "fail reset spi interface: %d\n", ret);
+
+ return ret;
+}
+
+static int ps8640_rom_prepare(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[2];
+ struct device *dev = &client->dev;
+ u8 i, cmd[2];
+ int ret;
+
+ cmd[0] = WRITE_ENABLE_CMD;
+ ret = ps8640_spi_send_cmd(ps_bridge, cmd, 1);
+ if (ret) {
+ dev_err(dev, "failed enable-write-status-register: %d\n", ret);
+ return ret;
+ }
+
+ cmd[0] = WRITE_STATUS_REG_CMD;
+ cmd[1] = CLEAR_ALL_PROTECT;
+ ret = ps8640_spi_send_cmd(ps_bridge, cmd, 2);
+ if (ret) {
+ dev_err(dev, "fail disable all protection: %d\n", ret);
+ return ret;
+ }
+
+ /* wait for SPI module ready */
+ ret = ps8640_wait_rom_idle(ps_bridge);
+ if (ret) {
+ dev_err(dev, "fail wait rom idle: %d\n", ret);
+ return ret;
+ }
+
+ ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_ENABLE);
+ for (i = 0; i < 6; i++)
+ ps8640_write_byte(client, PAGE2_ENCTLSPI_WR, enc_ctrl_code[i]);
+ ps8640_write_byte(client, PAGE2_IROM_CTRL, IROM_DISABLE);
+
+ /* Enable-Write-Status-Register */
+ cmd[0] = WRITE_ENABLE_CMD;
+ ret = ps8640_spi_send_cmd(ps_bridge, cmd, 1);
+ if (ret) {
+ dev_err(dev, "fail enable-write-status-register: %d\n", ret);
+ return ret;
+ }
+
+ /* chip erase command */
+ cmd[0] = CHIP_ERASE_CMD;
+ ret = ps8640_spi_send_cmd(ps_bridge, cmd, 1);
+ if (ret) {
+ dev_err(dev, "fail disable all protection: %d\n", ret);
+ return ret;
+ }
+
+ ret = ps8640_wait_rom_idle(ps_bridge);
+ if (ret) {
+ dev_err(dev, "fail wait rom idle: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ps8640_check_chip_id(struct ps8640 *ps_bridge)
+{
+ struct i2c_client *client = ps_bridge->page[4];
+ u8 buf[4];
+
+ ps8640_read(client, PAGE4_REV_L, buf, 4);
+ return memcmp(buf, hw_chip_id, sizeof(buf));
+}
+
+static int ps8640_validate_firmware(struct ps8640 *ps_bridge,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = ps_bridge->page[0];
+ u16 fw_chip_id;
+
+ /*
+ * Get the chip_id from the firmware. Make sure that it is the
+ * right controller to do the firmware and config update.
+ */
+ fw_chip_id = get_unaligned_le16(fw->data + FW_CHIP_ID_OFFSET);
+
+ if (fw_chip_id != 0x8640 && ps8640_check_chip_id(ps_bridge) == 0) {
+ dev_err(&client->dev,
+ "chip id mismatch: fw 0x%x vs. chip 0x8640\n",
+ fw_chip_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ps8640_write_rom(struct ps8640 *ps_bridge, const struct firmware *fw)
+{
+ struct i2c_client *client = ps_bridge->page[0];
+ struct device *dev = &client->dev;
+ struct i2c_client *client2 = ps_bridge->page[2];
+ struct i2c_client *client7 = ps_bridge->page[7];
+ size_t pos;
+ u8 buf[257], rom_page_id_buf[3];
+ int ret;
+ u16 cpy_len;
+
+ ps8640_write_byte(client2, PAGE2_SPI_CFG3, I2C_TO_SPI_RESET);
+ msleep(100);
+ ps8640_write_byte(client2, PAGE2_SPI_CFG3, 0x00);
+
+ for (pos = 0; pos < fw->size; pos += cpy_len) {
+ rom_page_id_buf[0] = PAGE2_ROMADD_BYTE1;
+ rom_page_id_buf[1] = pos >> 8;
+ rom_page_id_buf[2] = pos >> 16;
+ ret = ps8640_write_bytes(client2, rom_page_id_buf, 3);
+ if (ret)
+ goto error;
+ cpy_len = fw->size >= 256 + pos ? 256 : fw->size - pos;
+ buf[0] = 0;
+ memcpy(buf + 1, fw->data + pos, cpy_len);
+ ret = ps8640_write_bytes(client7, buf, cpy_len + 1);
+ if (ret)
+ goto error;
+
+ dev_dbg(dev, "fw update completed %zu / %zu bytes\n", pos,
+ fw->size);
+ }
+ return 0;
+
+error:
+ dev_err(dev, "failed write external flash, %d\n", ret);
+ return ret;
+}
+
+static int ps8640_spi_normal_mode(struct ps8640 *ps_bridge)
+{
+ u8 cmd[2];
+ struct i2c_client *client = ps_bridge->page[2];
+
+ /* Enable-Write-Status-Register */
+ cmd[0] = WRITE_ENABLE_CMD;
+ ps8640_spi_send_cmd(ps_bridge, cmd, 1);
+
+ /* protect BPL/BP0/BP1 */
+ cmd[0] = WRITE_STATUS_REG_CMD;
+ cmd[1] = BLK_PROTECT_BITS | STATUS_REG_PROTECT;
+ ps8640_spi_send_cmd(ps_bridge, cmd, 2);
+
+ /* wait for SPI rom ready */
+ ps8640_wait_rom_idle(ps_bridge);
+
+ /* disable PS8640 mapping function */
+ ps8640_write_byte(client, PAGE2_ENCTLSPI_WR, 0x00);
+
+ if (ps_bridge->gpio_mode_sel_n)
+ gpiod_set_value(ps_bridge->gpio_mode_sel_n, 1);
+ return 0;
+}
+
+static int ps8640_enter_bl(struct ps8640 *ps_bridge)
+{
+ ps_bridge->in_fw_update = true;
+ return ps8640_spi_dl_mode(ps_bridge);
+}
+
+static void ps8640_exit_bl(struct ps8640 *ps_bridge, const struct firmware *fw)
+{
+ ps8640_spi_normal_mode(ps_bridge);
+ ps_bridge->in_fw_update = false;
+}
+
+static int ps8640_load_fw(struct ps8640 *ps_bridge, const struct firmware *fw)
+{
+ struct i2c_client *client = ps_bridge->page[0];
+ struct device *dev = &client->dev;
+ int ret;
+ bool ps8640_status_backup = ps_bridge->enabled;
+
+ ret = ps8640_validate_firmware(ps_bridge, fw);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ps_bridge->fw_mutex);
+ if (!ps_bridge->in_fw_update) {
+ if (!ps8640_status_backup)
+ ps8640_pre_enable(&ps_bridge->bridge);
+
+ ret = ps8640_enter_bl(ps_bridge);
+ if (ret)
+ goto exit;
+ }
+
+ ret = ps8640_rom_prepare(ps_bridge);
+ if (ret)
+ goto exit;
+
+ ret = ps8640_write_rom(ps_bridge, fw);
+
+exit:
+ if (ret)
+ dev_err(dev, "Failed to load firmware, %d\n", ret);
+
+ ps8640_exit_bl(ps_bridge, fw);
+ if (!ps8640_status_backup)
+ ps8640_post_disable(&ps_bridge->bridge);
+ mutex_unlock(&ps_bridge->fw_mutex);
+ return ret;
+}
+
+static ssize_t ps8640_update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, PS_FW_NAME, dev);
+ if (error) {
+ dev_err(dev, "Unable to open firmware %s: %d\n",
+ PS_FW_NAME, error);
+ return error;
+ }
+
+ error = ps8640_load_fw(ps_bridge, fw);
+ if (error)
+ dev_err(dev, "The firmware update failed(%d)\n", error);
+ else
+ dev_info(dev, "The firmware update succeeded\n");
+
+ release_firmware(fw);
+ return error ? error : count;
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, ps8640_fw_version_show, NULL);
+static DEVICE_ATTR(hw_version, S_IRUGO, ps8640_hw_version_show, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, ps8640_update_fw_store);
+
+static struct attribute *ps8640_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_hw_version.attr,
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static const struct attribute_group ps8640_attr_group = {
+ .attrs = ps8640_attrs,
+};
+
+static void ps8640_remove_sysfs_group(void *data)
+{
+ struct ps8640 *ps_bridge = data;
+
+ sysfs_remove_group(&ps_bridge->page[0]->dev.kobj, &ps8640_attr_group);
+}
+
+static int ps8640_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ps8640 *ps_bridge;
+ struct device_node *np = dev->of_node;
+ struct device_node *port, *out_ep;
+ struct device_node *panel_node = NULL;
+ int ret;
+ u32 i;
+
+ ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
+ if (!ps_bridge)
+ return -ENOMEM;
+
+ /* port@1 is ps8640 output port */
+ port = of_graph_get_port_by_id(np, 1);
+ if (port) {
+ out_ep = of_get_child_by_name(port, "endpoint");
+ of_node_put(port);
+ if (out_ep) {
+ panel_node = of_graph_get_remote_port_parent(out_ep);
+ of_node_put(out_ep);
+ }
+ }
+ if (panel_node) {
+ ps_bridge->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!ps_bridge->panel)
+ return -EPROBE_DEFER;
+ }
+
+ mutex_init(&ps_bridge->fw_mutex);
+ ps_bridge->v12 = devm_regulator_get(dev, "vdd12");
+ if (IS_ERR(ps_bridge->v12))
+ return PTR_ERR(ps_bridge->v12);
+
+ ps_bridge->v33 = devm_regulator_get(dev, "vdd33");
+ if (IS_ERR(ps_bridge->v33))
+ return PTR_ERR(ps_bridge->v33);
+
+ if (ret) {
+ dev_info(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ps_bridge->gpio_mode_sel_n = devm_gpiod_get_optional(&client->dev,
+ "mode-sel",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_mode_sel_n)) {
+ ret = PTR_ERR(ps_bridge->gpio_mode_sel_n);
+ dev_err(dev, "cannot get mode-sel %d\n", ret);
+ return ret;
+ }
+
+ ps_bridge->gpio_slp_n = devm_gpiod_get(&client->dev, "sleep",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ps_bridge->gpio_slp_n)) {
+ ret = PTR_ERR(ps_bridge->gpio_slp_n);
+ dev_err(dev, "cannot get sleep: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Request the reset pin low to avoid the bridge being
+ * initialized prematurely
+ */
+ ps_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ps_bridge->gpio_rst_n)) {
+ ret = PTR_ERR(ps_bridge->gpio_rst_n);
+ dev_err(dev, "cannot get reset: %d\n", ret);
+ return ret;
+ }
+
+ ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
+ ps_bridge->bridge.of_node = dev->of_node;
+
+ ps_bridge->page[0] = client;
+ ps_bridge->ddc_i2c = i2c_new_dummy(client->adapter, EDID_I2C_ADDR);
+ if (!ps_bridge->ddc_i2c) {
+ dev_err(dev, "failed ddc_i2c dummy device, address%02x\n",
+ EDID_I2C_ADDR);
+ return -EBUSY;
+ }
+ /*
+ * ps8640 uses multiple addresses, use dummy devices for them
+ * page[0]: for DP control
+ * page[1]: for VIDEO Bridge
+ * page[2]: for control top
+ * page[3]: for DSI Link Control1
+ * page[4]: for MIPI Phy
+ * page[5]: for VPLL
+ * page[6]: for DSI Link Control2
+ * page[7]: for spi rom mapping
+ */
+ for (i = 1; i < MAX_DEVS; i++) {
+ ps_bridge->page[i] = i2c_new_dummy(client->adapter,
+ client->addr + i);
+ if (!ps_bridge->page[i]) {
+ dev_err(dev, "failed i2c dummy device, address%02x\n",
+ client->addr + i);
+ ret = -EBUSY;
+ goto exit_dummy;
+ }
+ }
+ i2c_set_clientdata(client, ps_bridge);
+
+ ret = sysfs_create_group(&client->dev.kobj, &ps8640_attr_group);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs entries: %d\n", ret);
+ goto exit_dummy;
+ }
+
+ ret = devm_add_action(dev, ps8640_remove_sysfs_group, ps_bridge);
+ if (ret) {
+ dev_err(dev, "failed to add sysfs cleanup action: %d\n", ret);
+ goto exit_remove_sysfs;
+ }
+
+ ret = drm_bridge_add(&ps_bridge->bridge);
+ if (ret) {
+ dev_err(dev, "Failed to add bridge: %d\n", ret);
+ goto exit_remove_sysfs;
+ }
+ return 0;
+
+exit_remove_sysfs:
+ sysfs_remove_group(&ps_bridge->page[0]->dev.kobj, &ps8640_attr_group);
+exit_dummy:
+ while (--i)
+ i2c_unregister_device(ps_bridge->page[i]);
+ i2c_unregister_device(ps_bridge->ddc_i2c);
+ return ret;
+}
+
+static int ps8640_remove(struct i2c_client *client)
+{
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+ int i = MAX_DEVS;
+
+ drm_bridge_remove(&ps_bridge->bridge);
+ sysfs_remove_group(&ps_bridge->page[0]->dev.kobj, &ps8640_attr_group);
+ while (--i)
+ i2c_unregister_device(ps_bridge->page[i]);
+
+ i2c_unregister_device(ps_bridge->ddc_i2c);
+ return 0;
+}
+
+static const struct i2c_device_id ps8640_i2c_table[] = {
+ { "parade,ps8640", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ps8640_i2c_table);
+
+static const struct of_device_id ps8640_match[] = {
+ { .compatible = "parade,ps8640" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ps8640_match);
+
+static struct i2c_driver ps8640_driver = {
+ .id_table = ps8640_i2c_table,
+ .probe = ps8640_probe,
+ .remove = ps8640_remove,
+ .driver = {
+ .name = "parade,ps8640",
+ .of_match_table = ps8640_match,
+ },
+};
+module_i2c_driver(ps8640_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>");
+MODULE_AUTHOR("CK Hu <ck.hu-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>");
+MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver");
+MODULE_LICENSE("GPL v2");
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC 06/11] arm64: dts: mediatek: Add Elm Rev. 3 device tree
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
` (3 preceding siblings ...)
2017-09-29 13:09 ` [RFC 04/11] drm/bridge: Parade PS8640 MIPI DSI -> eDP converter driver Ulrich Hecht
@ 2017-09-29 13:09 ` Ulrich Hecht
[not found] ` <1506690553-27357-7-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2017-09-29 13:09 ` [RFC 08/11] hack: drm/mediatek: roll back drm_of_find_panel_or_bridge() use Ulrich Hecht
2017-09-29 13:09 ` [RFC 10/11] hack: mtk-sd: use old driver from vendor kernel Ulrich Hecht
6 siblings, 1 reply; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
arch/arm64/boot/dts/mediatek/Makefile | 1 +
arch/arm64/boot/dts/mediatek/mt8173-elm-rev3.dts | 21 +
arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi | 210 +++++
arch/arm64/boot/dts/mediatek/mt8173-oak.dtsi | 1014 ++++++++++++++++++++++
4 files changed, 1246 insertions(+)
create mode 100644 arch/arm64/boot/dts/mediatek/mt8173-elm-rev3.dts
create mode 100644 arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
create mode 100644 arch/arm64/boot/dts/mediatek/mt8173-oak.dtsi
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 151723b..0521e29 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -4,6 +4,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-rev3.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-rev3.dts b/arch/arm64/boot/dts/mediatek/mt8173-elm-rev3.dts
new file mode 100644
index 0000000..68d4095
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-rev3.dts
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 MediaTek Inc.
+ * Author: Eddie Huang <eddie.huang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt8173-elm.dtsi"
+
+/ {
+ model = "Mediatek Elm rev3 board";
+ compatible = "google,elm-rev3", "google,elm", "mediatek,mt8173";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
new file mode 100644
index 0000000..850037f
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2016 MediaTek Inc.
+ * Author: Eddie Huang <eddie.huang-NuS5LvNUpcJWk0Htik3J/w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt8173-oak.dtsi"
+
+/ {
+ hdmicon: connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_mux_out_hdmi>;
+ };
+ };
+ };
+
+ hdmi_mux: hdmi_mux {
+ compatible = "gpio-display-mux";
+ status = "okay";
+ detect-gpios = <&pio 36 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_mux_pins>;
+ ddc-i2c-bus = <&hdmiddc0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 { /* input */
+ reg = <0>;
+
+ hdmi_mux_in: endpoint {
+ remote-endpoint = <&hdmi0_out>;
+ };
+ };
+
+ port@1 { /* output */
+ reg = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_mux_out_anx: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&anx7688_in>;
+ };
+
+ hdmi_mux_out_hdmi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+
+ sound: sound {
+ compatible = "mediatek,mt8173-rt5650";
+ mediatek,audio-codec = <&rt5650 &hdmi0>;
+ mediatek,mclk = <1>;
+ mediatek,platform = <&afe>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&aud_i2s2>;
+ codec-capture {
+ sound-dai = <&rt5650 1>;
+ };
+ };
+};
+
+&aud_i2s2 {
+ pins1 {
+ pinmux = <MT8173_PIN_128_I2S0_LRCK__FUNC_I2S1_WS>,
+ <MT8173_PIN_129_I2S0_BCK__FUNC_I2S1_BCK>,
+ <MT8173_PIN_130_I2S0_MCK__FUNC_I2S1_MCK>,
+ <MT8173_PIN_131_I2S0_DATA0__FUNC_I2S1_DO_1>,
+ <MT8173_PIN_12_EINT12__FUNC_I2S2_WS>,
+ <MT8173_PIN_13_EINT13__FUNC_I2S2_BCK>,
+ <MT8173_PIN_132_I2S0_DATA1__FUNC_I2S2_DI_2>;
+ bias-pull-down;
+ };
+};
+
+&cpu_thermal {
+ sustainable-power = <4500>; /* milliwatts */
+ trips {
+ threshold: trip-point@0 {
+ temperature = <60000>;
+ };
+
+ target: trip-point@1 {
+ temperature = <65000>;
+ };
+ };
+};
+
+&cros_ec {
+ i2c_tunnel_b: i2c-tunnel@1 {
+ compatible = "google,cros-ec-i2c-tunnel";
+ google,remote-bus = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ anx7688: anx7688@2c {
+ compatible = "analogix,anx7688";
+ status = "okay";
+ reg = <0x2c>;
+
+ port {
+ anx7688_in: endpoint {
+ remote-endpoint = <&hdmi_mux_out_anx>;
+ };
+ };
+ };
+ };
+};
+
+&hdmi0 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+ hdmi0_out: endpoint {
+ remote-endpoint = <&hdmi_mux_in>;
+ };
+ };
+ };
+};
+
+&hdmi_phy {
+ status = "okay";
+ mediatek,ibias = <0xc>;
+};
+
+&i2c0 {
+ ps8640: edp-bridge@8 {
+ compatible = "parade,ps8640";
+ reg = <0x8>;
+ sleep-gpios = <&pio 127 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 115 GPIO_ACTIVE_HIGH>;
+ mode-sel-gpios = <&pio 92 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ps8640_pins>;
+ vdd12-supply = <&ps8640_fixed_1v2>;
+ vdd33-supply = <&mt6397_vgp2_reg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ps8640_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ps8640_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&gpio_keys {
+ lid {
+ gpios = <&pio 69 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mt6397_vgp6_reg {
+ regulator-always-on;
+};
+
+&pio {
+ hdmi_mux_pins: hdmi_mux_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_36_DAISYNC__FUNC_GPIO36>;
+ };
+ };
+};
+
+&ps8640_fixed_1v2 {
+ regulator-enable-ramp-delay = <2000>;
+};
+
+&rt5650 {
+ #sound-dai-cells = <1>;
+ realtek,dmic1-data-pin = <2>;
+ realtek,jd-mode = <2>;
+};
+
+
+&trackpad {
+ vcc-supply = <&mt6397_vgp6_reg>;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-oak.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-oak.dtsi
new file mode 100644
index 0000000..33a5b07
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-oak.dtsi
@@ -0,0 +1,1014 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "mt8173.dtsi"
+#include "mt8173-pinfunc.h"
+
+/ {
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x80000000>;
+ };
+
+ backlight_lcd: backlight_lcd {
+ compatible = "pwm-backlight";
+ pwms = <&pwm0 0 1000000>;
+ brightness-levels = <
+ 0 16 32 48 64 80 96 112
+ 128 144 160 176 192 208 224 240
+ 255
+ >;
+ default-brightness-level = <9>;
+ power-supply = <&bl_fixed_reg>;
+ enable-gpios = <&pio 95 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&disp_pwm0_pins>;
+ status = "okay";
+ };
+
+ bl_fixed_reg: fixedregulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "bl_fixed";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ startup-delay-us = <1000>;
+ enable-active-high;
+ gpio = <&pio 32 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bl_fixed_pins>;
+ };
+
+ chosen {
+ linux,stdout-path = &uart0;
+ };
+
+ firmware {
+ chromeos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&chromeos_write_protect>;
+ write-protect-gpio = <&pio 4 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pins>;
+
+ lid {
+ label = "Lid";
+ gpios = <&pio 12 GPIO_ACTIVE_LOW>;
+ linux,code = <0>; /* SW_LID */
+ linux,input-type = <5>; /* EV_SW */
+ gpio-key,wakeup;
+ };
+
+ power {
+ label = "Power";
+ gpios = <&pio 14 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_POWER>;
+ debounce-interval = <30>;
+ gpio-key,wakeup;
+ };
+
+ tablet_mode {
+ label = "Tablet_mode";
+ gpios = <&pio 121 GPIO_ACTIVE_HIGH>;
+ linux,code = <1>; /* SW_TABLET_MODE */
+ linux,input-type = <5>; /* EV_SW */
+ gpio-key,wakeup;
+ };
+
+ volume_down {
+ label = "Volume_down";
+ gpios = <&pio 123 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ volume_up {
+ label = "Volume_up";
+ gpios = <&pio 124 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ panel: panel {
+ compatible = "lg,lp120up1";
+ power-supply = <&panel_fixed_3v3>;
+ ddc-i2c-bus = <&i2c4>;
+ backlight = <&backlight_lcd>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&ps8640_out>;
+ };
+ };
+ };
+
+ panel_fixed_3v3: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "PANEL_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&pio 41 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_fixed_pins>;
+ };
+
+ ps8640_fixed_1v2: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "PS8640_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio = <&pio 30 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ps8640_fixed_pins>;
+ };
+
+ sdio_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 85 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb_p1_vbus: regulator@3 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p0_vbus: regulator@4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&cec {
+ status = "okay";
+};
+
+&cpu0 {
+ proc-supply = <&mt6397_vpca15_reg>;
+};
+
+&cpu1 {
+ proc-supply = <&mt6397_vpca15_reg>;
+};
+
+&cpu2 {
+ proc-supply = <&da9211_vcpu_reg>;
+ sram-supply = <&mt6397_vsramca7_reg>;
+};
+
+&cpu3 {
+ proc-supply = <&da9211_vcpu_reg>;
+ sram-supply = <&mt6397_vsramca7_reg>;
+};
+
+&cpu_thermal {
+ sustainable-power = <5000>; /* milliwatts */
+};
+
+&dsi0 {
+ status = "okay";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ dsi0_out: endpoint {
+ remote-endpoint = <&ps8640_in>;
+ };
+ };
+ };
+};
+
+&dpi0 {
+ status = "okay";
+};
+
+&hdmi_phy {
+ status = "okay";
+ mediatek,ibias = <0x3f>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ rt5650: audio-codec@1a {
+ compatible = "realtek,rt5650";
+ reg = <0x1a>;
+ avdd-supply = <&mt6397_vgp1_reg>;
+ cpvdd-supply = <&mt6397_vcama_reg>;
+ interrupt-parent = <&pio>;
+ interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rt5650_irq>;
+ realtek,jd-mode = <3>;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <1500000>;
+ status = "okay";
+
+ da9211: da9211@68 {
+ compatible = "dlg,da9211";
+ reg = <0x68>;
+
+ regulators {
+ da9211_vcpu_reg: BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1310000>;
+ regulator-min-microamp = <2000000>;
+ regulator-max-microamp = <4400000>;
+ regulator-ramp-delay = <10000>;
+ regulator-always-on;
+/* regulator-supported-modes =
+ <REGULATOR_OPERATION_MODE_FAST
+ REGULATOR_OPERATION_MODE_NORMAL>;*/
+ };
+
+ da9211_vgpu_reg: BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1310000>;
+ regulator-min-microamp = <2000000>;
+ regulator-max-microamp = <3000000>;
+ regulator-ramp-delay = <10000>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ tpm: tpm@20 {
+ compatible = "infineon,slb9645tt";
+ reg = <0x20>;
+ powered-while-suspended;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchscreen: touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+ interrupt-parent = <&pio>;
+ interrupts = <88 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ trackpad: trackpad@15 {
+ compatible = "elan,ekth3000";
+ interrupt-parent = <&pio>;
+ interrupts = <117 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_irq>;
+ reg = <0x15>;
+ wakeup-source;
+ };
+};
+
+&mipi_tx0 {
+ status = "okay";
+};
+
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ bus-width = <8>;
+ max-frequency = <50000000>;
+ cap-mmc-highspeed;
+ mediatek,hs200-cmd-int-delay=<26>;
+ mediatek,hs400-cmd-int-delay=<14>;
+ mediatek,hs400-cmd-resp-sel-rising;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ cap-mmc-hw-reset;
+ hs400-ds-delay = <0x14015>;
+ vmmc-supply = <&mt6397_vemc_3v3_reg>;
+ vqmmc-supply = <&mt6397_vio18_reg>;
+ assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <50000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ cd-gpios = <&pio 1 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&mt6397_vmch_reg>;
+ vqmmc-supply = <&mt6397_vmc_reg>;
+};
+
+&mmc3 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc3_pins_default>;
+ pinctrl-1 = <&mmc3_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ sdr104-clk-delay = <5>;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ cap-sdio-irq;
+ vmmc-supply = <&sdio_fixed_3v3>;
+ vqmmc-supply = <&mt6397_vgp3_reg>;
+ non-removable;
+ cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ btmrvl: btmrvl@2 {
+ compatible = "marvell,sd8897-bt";
+ reg = <2>;
+ interrupt-parent = <&pio>;
+ interrupts = <119 IRQ_TYPE_LEVEL_LOW>;
+ marvell,wakeup-pin = /bits/ 16 <0x0d>;
+ marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
+ };
+
+ mwifiex: mwifiex@1 {
+ compatible = "marvell,sd8897";
+ reg = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <38 IRQ_TYPE_LEVEL_LOW>;
+ marvell,wakeup-pin = <3>;
+ marvell,caldata_ed_mac = /bits/ 8 <
+0x01 0x00 0x07 0x00 0x14 0x00 0x01 0x00 0x00 0x00 0x12 0x00 0x02 0x00 0x01 0x00
+0x01 0x00 0x00 0x00 0x12 0x00 0x02 0x00 0x01 0x00
+ >;
+ };
+};
+
+&nor_flash {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nor_gpio1_pins>;
+ bus-width = <8>;
+ max-frequency = <50000000>;
+ non-removable;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ };
+};
+
+&pio {
+ aud_i2s2: aud_i2s2 {
+ pins1 {
+ pinmux = <MT8173_PIN_128_I2S0_LRCK__FUNC_I2S1_WS>,
+ <MT8173_PIN_129_I2S0_BCK__FUNC_I2S1_BCK>,
+ <MT8173_PIN_130_I2S0_MCK__FUNC_I2S1_MCK>,
+ <MT8173_PIN_131_I2S0_DATA0__FUNC_I2S1_DO_1>,
+ <MT8173_PIN_116_UCTS0__FUNC_I2S2_DI_1>,
+ <MT8173_PIN_132_I2S0_DATA1__FUNC_I2S2_DI_2>;
+ bias-pull-down;
+ };
+ };
+
+ bl_fixed_pins: bl_fixed_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_32_UTXD2__FUNC_GPIO32>;
+ output-low;
+ };
+ };
+
+ bt_wake_pins: bt_wake_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_119_KPROW0__FUNC_GPIO119>;
+ bias-pull-up;
+ };
+ };
+
+ chromeos_write_protect: chromeos_write_protect {
+ pins1 {
+ pinmux = <MT8173_PIN_4_EINT4__FUNC_GPIO4>;
+ bias-disable;
+ };
+ };
+
+ disp_pwm0_pins: disp_pwm0_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_87_DISP_PWM0__FUNC_DISP_PWM0>;
+ output-low;
+ };
+ };
+
+ gpio_keys_pins: gpio_keys_pins {
+ volume_pins {
+ pinmux = <MT8173_PIN_123_KPCOL1__FUNC_GPIO123>,
+ <MT8173_PIN_124_KPCOL2__FUNC_GPIO124>;
+ bias-pull-up;
+ };
+
+ tablet_mode_pins {
+ pinmux = <MT8173_PIN_121_KPROW2__FUNC_GPIO121>;
+ bias-pull-up;
+ };
+ };
+
+ mmc0_pins_default: mmc0default {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+ <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+ <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+ <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+ <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+ <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+ <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+ <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+ <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+ bias-pull-down;
+ };
+
+ pins_rst {
+ pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_default: mmc1default {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+ <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+ <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+ <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+ <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+ bias-pull-down;
+ drive-strength = <MTK_DRIVE_4mA>;
+ };
+
+ pins_insert {
+ pinmux = <MT8173_PIN_1_EINT1__FUNC_GPIO1>;
+ bias-pull-up;
+ };
+ };
+
+ mmc3_pins_default: mmc3default {
+ pins_dat {
+ pinmux = <MT8173_PIN_22_MSDC3_DAT0__FUNC_MSDC3_DAT0>,
+ <MT8173_PIN_23_MSDC3_DAT1__FUNC_MSDC3_DAT1>,
+ <MT8173_PIN_24_MSDC3_DAT2__FUNC_MSDC3_DAT2>,
+ <MT8173_PIN_25_MSDC3_DAT3__FUNC_MSDC3_DAT3>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_cmd {
+ pinmux = <MT8173_PIN_27_MSDC3_CMD__FUNC_MSDC3_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_26_MSDC3_CLK__FUNC_MSDC3_CLK>;
+ bias-pull-down;
+ drive-strength = <MTK_DRIVE_8mA>;
+ };
+
+ pins_pdn {
+ pinmux = <MT8173_PIN_85_AUD_DAT_MOSI__FUNC_GPIO85>;
+ output-low;
+ };
+ };
+
+ mmc0_pins_uhs: mmc0 {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+ <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+ <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+ <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+ <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+ <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+ <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+ <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+ <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_ds {
+ pinmux = <MT8173_PIN_67_MSDC0_DSL__FUNC_MSDC0_DSL>;
+ drive-strength = <MTK_DRIVE_10mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_rst {
+ pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_uhs: mmc1 {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+ <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+ <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+ <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+ <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+
+ mmc3_pins_uhs: mmc3 {
+ pins_dat {
+ pinmux = <MT8173_PIN_22_MSDC3_DAT0__FUNC_MSDC3_DAT0>,
+ <MT8173_PIN_23_MSDC3_DAT1__FUNC_MSDC3_DAT1>,
+ <MT8173_PIN_24_MSDC3_DAT2__FUNC_MSDC3_DAT2>,
+ <MT8173_PIN_25_MSDC3_DAT3__FUNC_MSDC3_DAT3>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_cmd {
+ pinmux = <MT8173_PIN_27_MSDC3_CMD__FUNC_MSDC3_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_26_MSDC3_CLK__FUNC_MSDC3_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+
+ nor_gpio1_pins: nor {
+ pins1 {
+ pinmux = <MT8173_PIN_6_EINT6__FUNC_SFCS0>,
+ <MT8173_PIN_7_EINT7__FUNC_SFHOLD>,
+ <MT8173_PIN_8_EINT8__FUNC_SFIN>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up;
+ };
+
+ pins2 {
+ pinmux = <MT8173_PIN_5_EINT5__FUNC_SFOUT>;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_9_EINT9__FUNC_SFCK>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up;
+ };
+ };
+
+ panel_fixed_pins: panel_fixed_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_41_CMMCLK__FUNC_GPIO41>;
+ };
+ };
+
+ ps8640_pins: ps8640_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_92_PCM_CLK__FUNC_GPIO92>,
+ <MT8173_PIN_115_URTS0__FUNC_GPIO115>,
+ <MT8173_PIN_127_LCM_RST__FUNC_GPIO127>;
+ };
+ };
+
+ ps8640_fixed_pins: ps8640_fixed_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_30_URTS2__FUNC_GPIO30>;
+ };
+ };
+
+ rt5650_irq: rt5650_irq {
+ pins1 {
+ pinmux = <MT8173_PIN_3_EINT3__FUNC_GPIO3>;
+ bias-pull-down;
+ };
+ };
+
+ spi_pins_a: spi1 {
+ pins1 {
+ pinmux = <MT8173_PIN_0_EINT0__FUNC_GPIO0>;
+ bias-pull-up;
+ };
+
+ pins_spi {
+ pinmux = <MT8173_PIN_102_MSDC2_DAT2__FUNC_SPI_CK_1_>,
+ <MT8173_PIN_103_MSDC2_DAT3__FUNC_SPI_MI_1_>,
+ <MT8173_PIN_104_MSDC2_CLK__FUNC_SPI_MO_1_>,
+ <MT8173_PIN_105_MSDC2_CMD__FUNC_SPI_CS_1_>;
+ bias-disable;
+ };
+ };
+
+ trackpad_irq: trackpad_irq {
+ pins1 {
+ pinmux = <MT8173_PIN_117_URXD3__FUNC_GPIO117>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+
+ usb_pins: usb@0 {
+ pins1 {
+ pinmux = <MT8173_PIN_101_MSDC2_DAT1__FUNC_GPIO101>;
+ output-high;
+ bias-disable;
+ };
+ };
+
+ usb_id_pins_float: usb_iddig_pull_up {
+ pins_iddig {
+ pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+ bias-pull-up;
+ };
+ };
+
+ usb_id_pins_ground: usb_iddig_pull_down {
+ pins_iddig {
+ pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+ bias-pull-down;
+ };
+ };
+
+ wifi_wake_pins: wifi_wake_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_38_CONN_RST__FUNC_GPIO38>;
+ bias-pull-up;
+ };
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwrap {
+ pmic: mt6397 {
+ compatible = "mediatek,mt6397";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ syscfg_pctl_pmic: syscfg_pctl_pmic@c000 {
+ compatible = "mediatek,mt6397-pctl-pmic-syscfg",
+ "syscon";
+ reg = <0 0x0000c000 0 0x0108>;
+ };
+
+ pio6397: pinctrl@c000 {
+ compatible = "mediatek,mt6397-pinctrl";
+ pins-are-numbered;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ mt6397clock: mt6397clock {
+ compatible = "mediatek,mt6397-clk";
+ #clock-cells = <1>;
+ };
+
+ mt6397regulator: mt6397regulator {
+ compatible = "mediatek,mt6397-regulator";
+
+ mt6397_vpca15_reg: buck_vpca15 {
+ regulator-compatible = "buck_vpca15";
+ regulator-name = "vpca15";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+/* regulator-supported-modes =
+ <REGULATOR_OPERATION_MODE_FAST
+ REGULATOR_OPERATION_MODE_NORMAL>;*/
+ };
+
+ mt6397_vpca7_reg: buck_vpca7 {
+ regulator-compatible = "buck_vpca7";
+ regulator-name = "vpca7";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ regulator-always-on;
+ };
+
+ mt6397_vsramca15_reg: buck_vsramca15 {
+ regulator-compatible = "buck_vsramca15";
+ regulator-name = "vsramca15";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vsramca7_reg: buck_vsramca7 {
+ regulator-compatible = "buck_vsramca7";
+ regulator-name = "vsramca7";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vcore_reg: buck_vcore {
+ regulator-compatible = "buck_vcore";
+ regulator-name = "vcore";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vgpu_reg: buck_vgpu {
+ regulator-compatible = "buck_vgpu";
+ regulator-name = "vgpu";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vdrm_reg: buck_vdrm {
+ regulator-compatible = "buck_vdrm";
+ regulator-name = "vdrm";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vio18_reg: buck_vio18 {
+ regulator-compatible = "buck_vio18";
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vtcxo_reg: ldo_vtcxo {
+ regulator-compatible = "ldo_vtcxo";
+ regulator-name = "vtcxo";
+ regulator-always-on;
+ };
+
+ mt6397_va28_reg: ldo_va28 {
+ regulator-compatible = "ldo_va28";
+ regulator-name = "va28";
+ };
+
+ mt6397_vcama_reg: ldo_vcama {
+ regulator-compatible = "ldo_vcama";
+ regulator-name = "vcama";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vio28_reg: ldo_vio28 {
+ regulator-compatible = "ldo_vio28";
+ regulator-name = "vio28";
+ regulator-always-on;
+ };
+
+ mt6397_vusb_reg: ldo_vusb {
+ regulator-compatible = "ldo_vusb";
+ regulator-name = "vusb";
+ };
+
+ mt6397_vmc_reg: ldo_vmc {
+ regulator-compatible = "ldo_vmc";
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmch_reg: ldo_vmch {
+ regulator-compatible = "ldo_vmch";
+ regulator-name = "vmch";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ regulator-compatible = "ldo_vemc3v3";
+ regulator-name = "vemc_3v3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp1_reg: ldo_vgp1 {
+ regulator-compatible = "ldo_vgp1";
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_vgp2_reg: ldo_vgp2 {
+ regulator-compatible = "ldo_vgp2";
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp3_reg: ldo_vgp3 {
+ regulator-compatible = "ldo_vgp3";
+ regulator-name = "vcamaf";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp4_reg: ldo_vgp4 {
+ regulator-compatible = "ldo_vgp4";
+ regulator-name = "vgp4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp5_reg: ldo_vgp5 {
+ regulator-compatible = "ldo_vgp5";
+ regulator-name = "vgp5";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp6_reg: ldo_vgp6 {
+ regulator-compatible = "ldo_vgp6";
+ regulator-name = "vgp6";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vibr_reg: ldo_vibr {
+ regulator-compatible = "ldo_vibr";
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+ };
+ };
+};
+
+&scpsys {
+ mfg_async-supply = <&da9211_vgpu_reg>;
+};
+
+&spi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_pins_a>;
+ mediatek,pad-select = <1>;
+ status = "okay";
+ /* clients */
+ cros_ec: ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0x0>;
+ spi-max-frequency = <12000000>;
+ interrupt-parent = <&pio>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ google,cros-ec-spi-msg-delay = <500>;
+
+ i2c_tunnel: i2c-tunnel@0 {
+ compatible = "google,cros-ec-i2c-tunnel";
+ google,remote-bus = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery: sbs-battery@b {
+ compatible = "sbs,sbs-battery";
+ reg = <0xb>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <1>;
+ };
+ };
+ };
+};
+
+&thermal {
+ bank0-supply = <&mt6397_vpca15_reg>;
+ bank1-supply = <&da9211_vcpu_reg>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&ssusb {
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p0_vbus>;
+// extcon = <&extcon_usb>;
+ dr_mode = "host";
+ mediatek,enable-wakeup;
+ pinctrl-names = "default", "id_float", "id_ground";
+// pinctrl-0 = <&usb_pins>;
+ pinctrl-0 = <&usb_id_pins_float>;
+ pinctrl-1 = <&usb_id_pins_float>;
+ pinctrl-2 = <&usb_id_pins_ground>;
+ status = "okay";
+};
+
+&usb_host {
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p1_vbus>;
+ status = "okay";
+};
+
+#include "../../../../arm/boot/dts/cros-ec-keyboard.dtsi"
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC 08/11] hack: drm/mediatek: roll back drm_of_find_panel_or_bridge() use
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
` (4 preceding siblings ...)
2017-09-29 13:09 ` [RFC 06/11] arm64: dts: mediatek: Add Elm Rev. 3 device tree Ulrich Hecht
@ 2017-09-29 13:09 ` Ulrich Hecht
2017-09-29 13:09 ` [RFC 10/11] hack: mtk-sd: use old driver from vendor kernel Ulrich Hecht
6 siblings, 0 replies; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Haven't looked into how to do this properly.
Fixes display initialization on the Acer Chromebook R13.
Reverts mediatek part of ebc9446135671b89c2397f438af45d9cef0d1368
("drm: convert drivers to use drm_of_find_panel_or_bridge")
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
drivers/gpu/drm/mediatek/mtk_dsi.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 7e5e24c..fd10cc4 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1093,6 +1093,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
+ struct device_node *remote_node, *endpoint;
struct resource *regs;
int irq_num;
int comp_id;
@@ -1105,10 +1106,22 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &dsi->panel, &dsi->bridge);
- if (ret)
- return ret;
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ remote_node = of_graph_get_remote_port_parent(endpoint);
+ if (!remote_node) {
+ dev_err(dev, "No panel connected\n");
+ return -ENODEV;
+ }
+
+ dsi->bridge = of_drm_find_bridge(remote_node);
+ dsi->panel = of_drm_find_panel(remote_node);
+ of_node_put(remote_node);
+ if (!dsi->bridge && !dsi->panel) {
+ dev_info(dev, "Waiting for bridge or panel driver\n");
+ return -EPROBE_DEFER;
+ }
+ }
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread* [RFC 10/11] hack: mtk-sd: use old driver from vendor kernel
[not found] ` <1506690553-27357-1-git-send-email-ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
` (5 preceding siblings ...)
2017-09-29 13:09 ` [RFC 08/11] hack: drm/mediatek: roll back drm_of_find_panel_or_bridge() use Ulrich Hecht
@ 2017-09-29 13:09 ` Ulrich Hecht
6 siblings, 0 replies; 17+ messages in thread
From: Ulrich Hecht @ 2017-09-29 13:09 UTC (permalink / raw)
To: magnus.damm-Re5JQEeQqe8AvxtiuMwx3w,
laurent.pinchart-ryLnwIuWjnjg/C1BVhZhaw,
jacopo-AW8dsiIh9cEdnm+yROfE0A
Cc: Ulrich Hecht, linux-mediatek-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r
Mainline driver doesn't work on R13, haven't looked into this yet.
Signed-off-by: Ulrich Hecht <ulrich.hecht+renesas-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
drivers/mmc/host/mtk-sd.c | 465 +++++++++++++++++++++++-----------------------
1 file changed, 230 insertions(+), 235 deletions(-)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 267f7ab..1286256 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of_address.h>
@@ -28,7 +29,6 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/interrupt.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
@@ -76,7 +76,6 @@
#define MSDC_PATCH_BIT1 0xb4
#define MSDC_PAD_TUNE 0xec
#define PAD_DS_TUNE 0x188
-#define PAD_CMD_TUNE 0x18c
#define EMMC50_CFG0 0x208
/*--------------------------------------------------------------------------*/
@@ -119,6 +118,7 @@
#define MSDC_PS_CDSTS (0x1 << 1) /* R */
#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
#define MSDC_PS_DAT (0xff << 16) /* R */
+#define MSDC_PS_DATA1 (0x1 << 17) /* R */
#define MSDC_PS_CMD (0x1 << 24) /* R */
#define MSDC_PS_WP (0x1 << 31) /* R */
@@ -212,17 +212,13 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
-#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
-#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
-#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
-#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
-#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
-
-#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
-#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
-#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
+#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
+#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
-#define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */
+#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
+#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
+#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
@@ -291,14 +287,12 @@ struct msdc_save_para {
u32 patch_bit0;
u32 patch_bit1;
u32 pad_ds_tune;
- u32 pad_cmd_tune;
u32 emmc50_cfg0;
};
struct msdc_tune_para {
u32 iocon;
u32 pad_tune;
- u32 pad_cmd_tune;
};
struct msdc_delay_phase {
@@ -313,6 +307,7 @@ struct msdc_host {
int cmd_rsp;
spinlock_t lock;
+ spinlock_t irqlock;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
@@ -331,19 +326,18 @@ struct msdc_host {
struct pinctrl_state *pins_uhs;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ bool irq_thread_alive;
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
+ bool clock_on;
unsigned char timing;
bool vqmmc_enabled;
u32 hs400_ds_delay;
- u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
- u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
- bool hs400_cmd_resp_sel_rising;
- /* cmd response sample selection for HS400 */
+ u32 sdr104_clk_delay;
bool hs400_mode; /* current eMMC will run at hs400 mode */
struct msdc_save_para save_para; /* used when gate HCLK */
struct msdc_tune_para def_tune_para; /* default tune setting */
@@ -400,6 +394,7 @@ static void msdc_reset_hw(struct msdc_host *host)
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd);
+static void msdc_recheck_sdio_irq(struct msdc_host *host);
static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
@@ -474,9 +469,11 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
struct mmc_data *data = mrq->data;
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
+ bool read = (data->flags & MMC_DATA_READ) != 0;
+
data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
@@ -488,8 +485,10 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
return;
if (data->host_cookie & MSDC_PREPARE_FLAG) {
+ bool read = (data->flags & MMC_DATA_READ) != 0;
+
dma_unmap_sg(host->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
data->host_cookie &= ~MSDC_PREPARE_FLAG;
}
}
@@ -522,6 +521,7 @@ static void msdc_gate_clock(struct msdc_host *host)
{
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->h_clk);
+ host->clock_on = false;
}
static void msdc_ungate_clock(struct msdc_host *host)
@@ -530,6 +530,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
clk_prepare_enable(host->src_clk);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
+ host->clock_on = true;
}
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
@@ -538,6 +539,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 flags;
u32 div;
u32 sclk;
+ unsigned long irq_flags;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -546,8 +548,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
return;
}
+ spin_lock_irqsave(&host->irqlock, irq_flags);
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
+ spin_unlock_irqrestore(&host->irqlock, irq_flags);
+
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
@@ -588,7 +593,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
}
sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | div);
+ (mode << 8) | (div % 0xff));
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
@@ -597,7 +602,10 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
host->timing = timing;
/* need because clk changed. */
msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
+
+ spin_lock_irqsave(&host->irqlock, irq_flags);
sdr_set_bits(host->base + MSDC_INTEN, flags);
+ spin_unlock_irqrestore(&host->irqlock, irq_flags);
/*
* mmc_select_hs400() will drop to 50Mhz and High speed mode,
@@ -609,14 +617,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
- writel(host->saved_tune_para.pad_cmd_tune,
- host->base + PAD_CMD_TUNE);
}
- if (timing == MMC_TIMING_MMC_HS400)
- sdr_set_field(host->base + PAD_CMD_TUNE,
- MSDC_PAD_TUNE_CMDRRDLY,
- host->hs400_cmd_int_delay);
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
}
@@ -661,7 +663,8 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
host->cmd_rsp = resp;
- if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
+ if ((opcode == SD_IO_RW_DIRECT &&
+ ((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT) ||
opcode == MMC_STOP_TRANSMISSION)
rawcmd |= (0x1 << 14);
else if (opcode == SD_SWITCH_VOLTAGE)
@@ -705,6 +708,7 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
struct mmc_command *cmd, struct mmc_data *data)
{
+ unsigned long flags;
bool read;
WARN_ON(host->data);
@@ -713,8 +717,12 @@ static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
msdc_dma_setup(host, &host->dma, data);
+
+ spin_lock_irqsave(&host->irqlock, flags);
sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
+ spin_unlock_irqrestore(&host->irqlock, flags);
+
dev_dbg(host->dev, "DMA start\n");
dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n",
__func__, cmd->opcode, data->blocks, read);
@@ -771,6 +779,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
if (mrq->data)
msdc_unprepare_data(host, mrq);
mmc_request_done(host->mmc, mrq);
+ msdc_recheck_sdio_irq(host);
}
/* returns true if command is fully handled; returns false otherwise */
@@ -794,15 +803,17 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
| MSDC_INT_CMDTMO)))
return done;
- spin_lock_irqsave(&host->lock, flags);
done = !host->cmd;
+ spin_lock_irqsave(&host->lock, flags);
host->cmd = NULL;
spin_unlock_irqrestore(&host->lock, flags);
if (done)
return true;
+ spin_lock_irqsave(&host->irqlock, flags);
sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
+ spin_unlock_irqrestore(&host->irqlock, flags);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
@@ -841,6 +852,15 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
return true;
}
+static int msdc_card_busy(struct mmc_host *mmc)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ u32 status = readl(host->base + MSDC_PS);
+
+ /* check if data0 is low */
+ return !(status & BIT(16));
+}
+
/* It is the core layer's responsibility to ensure card status
* is correct before issue a request. but host design do below
* checks recommended.
@@ -850,10 +870,20 @@ static inline bool msdc_cmd_is_ready(struct msdc_host *host,
{
/* The max busy time we can endure is 20ms */
unsigned long tmo = jiffies + msecs_to_jiffies(20);
+ u32 count = 0;
+
+ if (in_interrupt()) {
+ while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
+ (count < 1000)) {
+ udelay(1);
+ count++;
+ }
+ } else {
+ while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
+ time_before(jiffies, tmo))
+ cpu_relax();
+ }
- while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
- time_before(jiffies, tmo))
- cpu_relax();
if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) {
dev_err(host->dev, "CMD bus busy detected\n");
host->error |= REQ_CMD_BUSY;
@@ -861,17 +891,35 @@ static inline bool msdc_cmd_is_ready(struct msdc_host *host,
return false;
}
- if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
- tmo = jiffies + msecs_to_jiffies(20);
- /* R1B or with data, should check SDCBUSY */
- while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) &&
- time_before(jiffies, tmo))
- cpu_relax();
- if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) {
- dev_err(host->dev, "Controller busy detected\n");
- host->error |= REQ_CMD_BUSY;
- msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
- return false;
+ if (cmd->opcode != MMC_SEND_STATUS) {
+ count = 0;
+ /* Consider that CMD6 crc error before card was init done,
+ * mmc_retune() will return directly as host->card is null.
+ * and CMD6 will retry 3 times, must ensure card is in transfer
+ * state when retry.
+ */
+ tmo = jiffies + msecs_to_jiffies(60 * 1000);
+ while (1) {
+ if (msdc_card_busy(host->mmc)) {
+ if (in_interrupt()) {
+ udelay(1);
+ count++;
+ } else {
+ msleep_interruptible(10);
+ }
+ } else {
+ break;
+ }
+ /* Timeout if the device never
+ * leaves the program state.
+ */
+ if (count > 1000 || time_after(jiffies, tmo)) {
+ pr_err("%s: Card stuck in programming state! %s\n",
+ mmc_hostname(host->mmc), __func__);
+ host->error |= REQ_CMD_BUSY;
+ msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
+ return false;
+ }
}
}
return true;
@@ -880,6 +928,7 @@ static inline bool msdc_cmd_is_ready(struct msdc_host *host,
static void msdc_start_command(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
+ unsigned long flags;
u32 rawcmd;
WARN_ON(host->cmd);
@@ -898,7 +947,10 @@ static void msdc_start_command(struct msdc_host *host,
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
+ spin_lock_irqsave(&host->irqlock, flags);
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
+ spin_unlock_irqrestore(&host->irqlock, flags);
+
writel(cmd->arg, host->base + SDC_ARG);
writel(rawcmd, host->base + SDC_CMD);
}
@@ -990,8 +1042,8 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
| MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
| MSDC_INT_DMA_PROTECT);
- spin_lock_irqsave(&host->lock, flags);
done = !host->data;
+ spin_lock_irqsave(&host->lock, flags);
if (check_data)
host->data = NULL;
spin_unlock_irqrestore(&host->lock, flags);
@@ -1002,11 +1054,14 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
if (check_data || (stop && stop->error)) {
dev_dbg(host->dev, "DMA status: 0x%8X\n",
readl(host->base + MSDC_DMA_CFG));
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
- 1);
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
cpu_relax();
+
+ spin_lock_irqsave(&host->irqlock, flags);
sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
+ spin_unlock_irqrestore(&host->irqlock, flags);
+
dev_dbg(host->dev, "DMA stop\n");
if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
@@ -1084,15 +1139,6 @@ static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
return ret;
}
-static int msdc_card_busy(struct mmc_host *mmc)
-{
- struct msdc_host *host = mmc_priv(mmc);
- u32 status = readl(host->base + MSDC_PS);
-
- /* only check if data0 is low */
- return !(status & BIT(16));
-}
-
static void msdc_request_timeout(struct work_struct *work)
{
struct msdc_host *host = container_of(work, struct msdc_host,
@@ -1104,14 +1150,16 @@ static void msdc_request_timeout(struct work_struct *work)
dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__,
host->mrq, host->mrq->cmd->opcode);
if (host->cmd) {
- dev_err(host->dev, "%s: aborting cmd=%d\n",
- __func__, host->cmd->opcode);
+ dev_err(host->dev,
+ "%s: aborting cmd=%d, arg=0x%x\n", __func__,
+ host->cmd->opcode, host->cmd->arg);
msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq,
host->cmd);
} else if (host->data) {
- dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n",
- __func__, host->mrq->cmd->opcode,
- host->data->blocks);
+ dev_err(host->dev,
+ "%s: aborting data: cmd%d; %d blocks\n",
+ __func__, host->mrq->cmd->opcode,
+ host->data->blocks);
msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq,
host->data);
}
@@ -1120,44 +1168,47 @@ static void msdc_request_timeout(struct work_struct *work)
static irqreturn_t msdc_irq(int irq, void *dev_id)
{
+ unsigned long flags;
struct msdc_host *host = (struct msdc_host *) dev_id;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ u32 events, event_mask;
+
+ spin_lock_irqsave(&host->irqlock, flags);
+ events = readl(host->base + MSDC_INT);
+ event_mask = readl(host->base + MSDC_INTEN);
+ /* clear interrupts */
+ writel(events & event_mask, host->base + MSDC_INT);
+
+ mrq = host->mrq;
+ cmd = host->cmd;
+ data = host->data;
+ spin_unlock_irqrestore(&host->irqlock, flags);
+
+ if ((events & event_mask)& MSDC_INT_SDIOIRQ) {
+ mmc_signal_sdio_irq(host->mmc);
+ if (!mrq)
+ return IRQ_HANDLED;
+ }
- while (true) {
- unsigned long flags;
- struct mmc_request *mrq;
- struct mmc_command *cmd;
- struct mmc_data *data;
- u32 events, event_mask;
-
- spin_lock_irqsave(&host->lock, flags);
- events = readl(host->base + MSDC_INT);
- event_mask = readl(host->base + MSDC_INTEN);
- /* clear interrupts */
- writel(events & event_mask, host->base + MSDC_INT);
-
- mrq = host->mrq;
- cmd = host->cmd;
- data = host->data;
- spin_unlock_irqrestore(&host->lock, flags);
-
- if (!(events & event_mask))
- break;
+ if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
+ return IRQ_HANDLED;
- if (!mrq) {
- dev_err(host->dev,
+ if (!mrq) {
+ dev_err(host->dev,
"%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
__func__, events, event_mask);
- WARN_ON(1);
- break;
- }
+ WARN_ON(1);
+ return IRQ_HANDLED;
+ }
- dev_dbg(host->dev, "%s: events=%08X\n", __func__, events);
+ dev_dbg(host->dev, "%s: events=%08X\n", __func__, events);
- if (cmd)
- msdc_cmd_done(host, events, mrq, cmd);
- else if (data)
- msdc_data_xfer_done(host, events, mrq, data);
- }
+ if (cmd)
+ msdc_cmd_done(host, events, mrq, cmd);
+ else if (data)
+ msdc_data_xfer_done(host, events, mrq, data);
return IRQ_HANDLED;
}
@@ -1165,6 +1216,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
+ unsigned long flags;
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1176,11 +1228,14 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
/* Disable and clear all interrupts */
+ spin_lock_irqsave(&host->irqlock, flags);
writel(0, host->base + MSDC_INTEN);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
+ spin_unlock_irqrestore(&host->irqlock, flags);
- writel(0, host->base + MSDC_PAD_TUNE);
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_CLKTDLY, host->sdr104_clk_delay);
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
@@ -1193,9 +1248,11 @@ static void msdc_init_hw(struct msdc_host *host)
*/
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
- /* disable detect SDIO device interrupt function */
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
-
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+ else
+ /* disable detect SDIO device interrupt function */
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
@@ -1207,11 +1264,15 @@ static void msdc_init_hw(struct msdc_host *host)
static void msdc_deinit_hw(struct msdc_host *host)
{
u32 val;
+ unsigned long flags;
+
/* Disable and clear all interrupts */
+ spin_lock_irqsave(&host->irqlock, flags);
writel(0, host->base + MSDC_INTEN);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
+ spin_unlock_irqrestore(&host->irqlock, flags);
}
/* init gpd and bd list in msdc_drv_probe */
@@ -1278,6 +1339,7 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->mclk != ios->clock || host->timing != ios->timing)
msdc_set_mclk(host, ios->timing, ios->clock);
+
}
static u32 test_delay_bit(u32 delay, u32 bit)
@@ -1317,7 +1379,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
len_final = len;
}
start += len ? len : 1;
- if (len >= 12 && start_final < 4)
+ if (len >= 8 && start_final < 4)
break;
}
@@ -1340,67 +1402,36 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
- struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
- u32 internal_delay = 0;
int cmd_err;
- int i, j;
-
- if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
- mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_CMDRRDLY,
- host->hs200_cmd_int_delay);
+ int i;
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
sdr_set_field(host->base + MSDC_PAD_TUNE,
MSDC_PAD_TUNE_CMDRDLY, i);
- /*
- * Using the same parameters, it may sometimes pass the test,
- * but sometimes it may fail. To make sure the parameters are
- * more stable, we test each set of parameters 3 times.
- */
- for (j = 0; j < 3; j++) {
- mmc_send_tuning(mmc, opcode, &cmd_err);
- if (!cmd_err) {
- rise_delay |= (1 << i);
- } else {
- rise_delay &= ~(1 << i);
- break;
- }
- }
+ mmc_send_tuning(mmc, opcode, &cmd_err);
+ if (!cmd_err)
+ rise_delay |= (1 << i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
+ if (final_rise_delay.maxlen >= 10 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
sdr_set_field(host->base + MSDC_PAD_TUNE,
MSDC_PAD_TUNE_CMDRDLY, i);
- /*
- * Using the same parameters, it may sometimes pass the test,
- * but sometimes it may fail. To make sure the parameters are
- * more stable, we test each set of parameters 3 times.
- */
- for (j = 0; j < 3; j++) {
- mmc_send_tuning(mmc, opcode, &cmd_err);
- if (!cmd_err) {
- fall_delay |= (1 << i);
- } else {
- fall_delay &= ~(1 << i);
- break;
- }
- }
+ mmc_send_tuning(mmc, opcode, &cmd_err);
+ if (!cmd_err)
+ fall_delay |= (1 << i);
}
final_fall_delay = get_best_delay(host, fall_delay);
skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
- if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4)
- final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
@@ -1412,71 +1443,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
- if (host->hs200_cmd_int_delay)
- goto skip_internal;
- for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_CMDRRDLY, i);
- mmc_send_tuning(mmc, opcode, &cmd_err);
- if (!cmd_err)
- internal_delay |= (1 << i);
- }
- dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
- internal_delay_phase = get_best_delay(host, internal_delay);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
- internal_delay_phase.final_phase);
-skip_internal:
- dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
- return final_delay == 0xff ? -EIO : 0;
-}
-
-static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
-{
- struct msdc_host *host = mmc_priv(mmc);
- u32 cmd_delay = 0;
- struct msdc_delay_phase final_cmd_delay = { 0,};
- u8 final_delay;
- int cmd_err;
- int i, j;
-
- /* select EMMC50 PAD CMD tune */
- sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
-
- if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
- mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_CMDRRDLY,
- host->hs200_cmd_int_delay);
-
- if (host->hs400_cmd_resp_sel_rising)
- sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- else
- sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + PAD_CMD_TUNE,
- PAD_CMD_TUNE_RX_DLY3, i);
- /*
- * Using the same parameters, it may sometimes pass the test,
- * but sometimes it may fail. To make sure the parameters are
- * more stable, we test each set of parameters 3 times.
- */
- for (j = 0; j < 3; j++) {
- mmc_send_tuning(mmc, opcode, &cmd_err);
- if (!cmd_err) {
- cmd_delay |= (1 << i);
- } else {
- cmd_delay &= ~(1 << i);
- break;
- }
- }
- }
- final_cmd_delay = get_best_delay(host, cmd_delay);
- sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3,
- final_cmd_delay.final_phase);
- final_delay = final_cmd_delay.final_phase;
-
- dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
@@ -1499,7 +1466,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 ||
+ if (final_rise_delay.maxlen >= 10 ||
(final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
@@ -1532,7 +1499,6 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
final_delay = final_fall_delay.final_phase;
}
- dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
@@ -1541,13 +1507,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct msdc_host *host = mmc_priv(mmc);
int ret;
- if (host->hs400_mode)
- ret = hs400_tune_response(mmc, opcode);
- else
- ret = msdc_tune_response(mmc, opcode);
+ ret = msdc_tune_response(mmc, opcode);
if (ret == -EIO) {
dev_err(host->dev, "Tune response fail!\n");
- return ret;
+ goto out;
}
if (host->hs400_mode == false) {
ret = msdc_tune_data(mmc, opcode);
@@ -1557,7 +1520,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
- host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
+out:
return ret;
}
@@ -1579,39 +1542,64 @@ static void msdc_hw_reset(struct mmc_host *mmc)
sdr_clr_bits(host->base + EMMC_IOCON, 1);
}
-static const struct mmc_host_ops mt_msdc_ops = {
+/**
+ * msdc_recheck_sdio_irq - recheck whether the SDIO IRQ is lost
+ * @host: The host to check.
+ *
+ * Host controller may lost interrupt in some special case.
+ * Add sdio IRQ recheck mechanism to make sure all interrupts
+ * can be processed immediately
+ *
+*/
+static void msdc_recheck_sdio_irq(struct msdc_host *host)
+{
+ u32 reg_int, reg_ps;
+
+ if (host->clock_on && (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ && host->irq_thread_alive) {
+ reg_int = readl(host->base + MSDC_INT);
+ reg_ps = readl(host->base + MSDC_PS);
+ if (!((reg_int & MSDC_INT_SDIOIRQ) || (reg_ps & MSDC_PS_DATA1))) {
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ }
+}
+
+static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ unsigned long flags;
+ struct msdc_host *host = mmc_priv(mmc);
+
+ host->irq_thread_alive = true;
+ if (enable) {
+ pm_runtime_get_sync(host->dev);
+ msdc_recheck_sdio_irq(host);
+
+ spin_lock_irqsave(&host->irqlock, flags);
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+ sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
+ spin_unlock_irqrestore(&host->irqlock, flags);
+ } else {
+ spin_lock_irqsave(&host->irqlock, flags);
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
+ spin_unlock_irqrestore(&host->irqlock, flags);
+ }
+}
+
+static struct mmc_host_ops mt_msdc_ops = {
.post_req = msdc_post_req,
.pre_req = msdc_pre_req,
.request = msdc_ops_request,
.set_ios = msdc_ops_set_ios,
.get_ro = mmc_gpio_get_ro,
- .get_cd = mmc_gpio_get_cd,
.start_signal_voltage_switch = msdc_ops_switch_volt,
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
.hw_reset = msdc_hw_reset,
+ .enable_sdio_irq = msdc_enable_sdio_irq,
};
-static void msdc_of_property_parse(struct platform_device *pdev,
- struct msdc_host *host)
-{
- of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
- &host->hs400_ds_delay);
-
- of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
- &host->hs200_cmd_int_delay);
-
- of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay",
- &host->hs400_cmd_int_delay);
-
- if (of_property_read_bool(pdev->dev.of_node,
- "mediatek,hs400-cmd-resp-sel-rising"))
- host->hs400_cmd_resp_sel_rising = true;
- else
- host->hs400_cmd_resp_sel_rising = false;
-}
-
static int msdc_drv_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
@@ -1683,14 +1671,22 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
- msdc_of_property_parse(pdev, host);
+ if (!of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
+ &host->hs400_ds_delay))
+ dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
+ host->hs400_ds_delay);
+
+ if (!of_property_read_u32(pdev->dev.of_node, "sdr104-clk-delay",
+ &host->sdr104_clk_delay));
+ dev_dbg(&pdev->dev, "sdr104-clk-delay: %x\n",
+ host->sdr104_clk_delay);
host->dev = &pdev->dev;
host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ mmc->f_min = host->src_clk_freq / (4 * 255);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1703,6 +1699,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc_dev(mmc)->dma_mask = &host->dma_mask;
host->timeout_clks = 3 * 1048576;
+ host->irq_thread_alive = false;
host->dma.gpd = dma_alloc_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
&host->dma.gpd_addr, GFP_KERNEL);
@@ -1716,6 +1713,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_init_gpd_bd(host, &host->dma);
INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
spin_lock_init(&host->lock);
+ spin_lock_init(&host->irqlock);
platform_set_drvdata(pdev, mmc);
msdc_ungate_clock(host);
@@ -1775,7 +1773,7 @@ static int msdc_drv_remove(struct platform_device *pdev)
pm_runtime_disable(host->dev);
pm_runtime_put_noidle(host->dev);
dma_free_coherent(&pdev->dev,
- 2 * sizeof(struct mt_gpdma_desc),
+ sizeof(struct mt_gpdma_desc),
host->dma.gpd, host->dma.gpd_addr);
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
host->dma.bd, host->dma.bd_addr);
@@ -1795,7 +1793,6 @@ static void msdc_save_reg(struct msdc_host *host)
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
- host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
}
@@ -1808,7 +1805,6 @@ static void msdc_restore_reg(struct msdc_host *host)
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
- writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
}
@@ -1843,7 +1839,6 @@ static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt8135-mmc", },
{}
};
-MODULE_DEVICE_TABLE(of, msdc_of_ids);
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
--
2.7.4
^ permalink raw reply related [flat|nested] 17+ messages in thread