From: Hao Xiang <hao.xiang@bytedance.com>
To: quintela@redhat.com, peterx@redhat.com,
marcandre.lureau@redhat.com, bryan.zhang@bytedance.com,
qemu-devel@nongnu.org
Cc: Hao Xiang <hao.xiang@bytedance.com>
Subject: [PATCH 06/16] util/dsa: Implement DSA task asynchronous completion thread model.
Date: Wed, 25 Oct 2023 19:38:12 +0000 [thread overview]
Message-ID: <20231025193822.2813204-7-hao.xiang@bytedance.com> (raw)
In-Reply-To: <20231025193822.2813204-1-hao.xiang@bytedance.com>
* Create a dedicated thread for DSA task completion.
* DSA completion thread runs a loop and poll for completed tasks.
* Start and stop DSA completion thread during DSA device start stop.
User space application can directly submit task to Intel DSA
accelerator by writing to DSA's device memory (mapped in user space).
Once a task is submitted, the device starts processing it and write
the completion status back to the task. A user space application can
poll the task's completion status to check for completion. This change
uses a dedicated thread to perform DSA task completion checking.
Signed-off-by: Hao Xiang <hao.xiang@bytedance.com>
---
util/dsa.c | 243 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 242 insertions(+), 1 deletion(-)
diff --git a/util/dsa.c b/util/dsa.c
index f82282ce99..0e68013ffb 100644
--- a/util/dsa.c
+++ b/util/dsa.c
@@ -44,6 +44,7 @@
#define DSA_WQ_SIZE 4096
#define MAX_DSA_DEVICES 16
+#define DSA_COMPLETION_THREAD "dsa_completion"
typedef QSIMPLEQ_HEAD(dsa_task_queue, buffer_zero_batch_task) dsa_task_queue;
@@ -61,8 +62,18 @@ struct dsa_device_group {
dsa_task_queue task_queue;
};
+struct dsa_completion_thread {
+ bool stopping;
+ bool running;
+ QemuThread thread;
+ int thread_id;
+ QemuSemaphore sem_init_done;
+ struct dsa_device_group *group;
+};
+
uint64_t max_retry_count;
static struct dsa_device_group dsa_group;
+static struct dsa_completion_thread completion_thread;
/**
@@ -439,6 +450,234 @@ submit_batch_wi_async(struct buffer_zero_batch_task *batch_task)
return dsa_task_enqueue(device_group, batch_task);
}
+/**
+ * @brief Poll for the DSA work item completion.
+ *
+ * @param completion A pointer to the DSA work item completion record.
+ * @param opcode The DSA opcode.
+ *
+ * @return Zero if successful, non-zero otherwise.
+ */
+static int
+poll_completion(struct dsa_completion_record *completion,
+ enum dsa_opcode opcode)
+{
+ uint8_t status;
+ uint64_t retry = 0;
+
+ while (true) {
+ // The DSA operation completes successfully or fails.
+ status = completion->status;
+ if (status == DSA_COMP_SUCCESS ||
+ status == DSA_COMP_PAGE_FAULT_NOBOF ||
+ status == DSA_COMP_BATCH_PAGE_FAULT ||
+ status == DSA_COMP_BATCH_FAIL) {
+ break;
+ } else if (status != DSA_COMP_NONE) {
+ /* TODO: Error handling here on unexpected failure. */
+ fprintf(stderr, "DSA opcode %d failed with status = %d.\n",
+ opcode, status);
+ exit(1);
+ }
+ retry++;
+ if (retry > max_retry_count) {
+ fprintf(stderr, "Wait for completion retry %lu times.\n", retry);
+ exit(1);
+ }
+ _mm_pause();
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Complete a single DSA task in the batch task.
+ *
+ * @param task A pointer to the batch task structure.
+ */
+static void
+poll_task_completion(struct buffer_zero_batch_task *task)
+{
+ assert(task->task_type == DSA_TASK);
+
+ struct dsa_completion_record *completion = &task->completions[0];
+ uint8_t status;
+
+ poll_completion(completion, task->descriptors[0].opcode);
+
+ status = completion->status;
+ if (status == DSA_COMP_SUCCESS) {
+ task->results[0] = (completion->result == 0);
+ return;
+ }
+
+ assert(status == DSA_COMP_PAGE_FAULT_NOBOF);
+}
+
+/**
+ * @brief Poll a batch task status until it completes. If DSA task doesn't
+ * complete properly, use CPU to complete the task.
+ *
+ * @param batch_task A pointer to the DSA batch task.
+ */
+static void
+poll_batch_task_completion(struct buffer_zero_batch_task *batch_task)
+{
+ struct dsa_completion_record *batch_completion = &batch_task->batch_completion;
+ struct dsa_completion_record *completion;
+ uint8_t batch_status;
+ uint8_t status;
+ bool *results = batch_task->results;
+ uint32_t count = batch_task->batch_descriptor.desc_count;
+
+ poll_completion(batch_completion,
+ batch_task->batch_descriptor.opcode);
+
+ batch_status = batch_completion->status;
+
+ if (batch_status == DSA_COMP_SUCCESS) {
+ if (batch_completion->bytes_completed == count) {
+ // Let's skip checking for each descriptors' completion status
+ // if the batch descriptor says all succedded.
+ for (int i = 0; i < count; i++) {
+ assert(batch_task->completions[i].status == DSA_COMP_SUCCESS);
+ results[i] = (batch_task->completions[i].result == 0);
+ }
+ return;
+ }
+ } else {
+ assert(batch_status == DSA_COMP_BATCH_FAIL ||
+ batch_status == DSA_COMP_BATCH_PAGE_FAULT);
+ }
+
+ for (int i = 0; i < count; i++) {
+
+ completion = &batch_task->completions[i];
+ status = completion->status;
+
+ if (status == DSA_COMP_SUCCESS) {
+ results[i] = (completion->result == 0);
+ continue;
+ }
+
+ if (status != DSA_COMP_PAGE_FAULT_NOBOF) {
+ fprintf(stderr,
+ "Unexpected completion status = %u.\n", status);
+ assert(false);
+ }
+ }
+}
+
+/**
+ * @brief Handles an asynchronous DSA batch task completion.
+ *
+ * @param task A pointer to the batch buffer zero task structure.
+ */
+static void
+dsa_batch_task_complete(struct buffer_zero_batch_task *batch_task)
+{
+ batch_task->status = DSA_TASK_COMPLETION;
+ batch_task->completion_callback(batch_task);
+}
+
+/**
+ * @brief The function entry point called by a dedicated DSA
+ * work item completion thread.
+ *
+ * @param opaque A pointer to the thread context.
+ *
+ * @return void* Not used.
+ */
+static void *
+dsa_completion_loop(void *opaque)
+{
+ struct dsa_completion_thread *thread_context =
+ (struct dsa_completion_thread *)opaque;
+ struct buffer_zero_batch_task *batch_task;
+ struct dsa_device_group *group = thread_context->group;
+
+ rcu_register_thread();
+
+ thread_context->thread_id = qemu_get_thread_id();
+ qemu_sem_post(&thread_context->sem_init_done);
+
+ while (thread_context->running) {
+ batch_task = dsa_task_dequeue(group);
+ assert(batch_task != NULL || !group->running);
+ if (!group->running) {
+ assert(!thread_context->running);
+ break;
+ }
+ if (batch_task->task_type == DSA_TASK) {
+ poll_task_completion(batch_task);
+ } else {
+ assert(batch_task->task_type == DSA_BATCH_TASK);
+ poll_batch_task_completion(batch_task);
+ }
+
+ dsa_batch_task_complete(batch_task);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/**
+ * @brief Initializes a DSA completion thread.
+ *
+ * @param completion_thread A pointer to the completion thread context.
+ * @param group A pointer to the DSA device group.
+ */
+static void
+dsa_completion_thread_init(
+ struct dsa_completion_thread *completion_thread,
+ struct dsa_device_group *group)
+{
+ completion_thread->stopping = false;
+ completion_thread->running = true;
+ completion_thread->thread_id = -1;
+ qemu_sem_init(&completion_thread->sem_init_done, 0);
+ completion_thread->group = group;
+
+ qemu_thread_create(&completion_thread->thread,
+ DSA_COMPLETION_THREAD,
+ dsa_completion_loop,
+ completion_thread,
+ QEMU_THREAD_JOINABLE);
+
+ /* Wait for initialization to complete */
+ while (completion_thread->thread_id == -1) {
+ qemu_sem_wait(&completion_thread->sem_init_done);
+ }
+}
+
+/**
+ * @brief Stops the completion thread (and implicitly, the device group).
+ *
+ * @param opaque A pointer to the completion thread.
+ */
+static void dsa_completion_thread_stop(void *opaque)
+{
+ struct dsa_completion_thread *thread_context =
+ (struct dsa_completion_thread *)opaque;
+
+ struct dsa_device_group *group = thread_context->group;
+
+ qemu_mutex_lock(&group->task_queue_lock);
+
+ thread_context->stopping = true;
+ thread_context->running = false;
+
+ dsa_device_group_stop(group);
+
+ qemu_cond_signal(&group->task_queue_cond);
+ qemu_mutex_unlock(&group->task_queue_lock);
+
+ qemu_thread_join(&thread_context->thread);
+
+ qemu_sem_destroy(&thread_context->sem_init_done);
+}
+
/**
* @brief Check if DSA is running.
*
@@ -446,7 +685,7 @@ submit_batch_wi_async(struct buffer_zero_batch_task *batch_task)
*/
bool dsa_is_running(void)
{
- return false;
+ return completion_thread.running;
}
static void
@@ -481,6 +720,7 @@ void dsa_start(void)
return;
}
dsa_device_group_start(&dsa_group);
+ dsa_completion_thread_init(&completion_thread, &dsa_group);
}
/**
@@ -496,6 +736,7 @@ void dsa_stop(void)
return;
}
+ dsa_completion_thread_stop(&completion_thread);
dsa_empty_task_queue(group);
}
--
2.30.2
next prev parent reply other threads:[~2023-10-25 19:40 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-25 19:38 [PATCH 00/16] Use Intel DSA accelerator to offload zero page checking in multifd live migration Hao Xiang
2023-10-25 19:38 ` [PATCH 01/16] Cherry pick a set of patches that enables multifd zero page feature Hao Xiang
2023-10-27 12:30 ` Fabiano Rosas
2023-10-27 13:21 ` Peter Maydell
2023-10-28 1:13 ` [External] " Hao Xiang
2023-10-28 1:06 ` Hao Xiang
2023-10-30 13:58 ` Fabiano Rosas
2023-11-06 18:53 ` Hao Xiang
2023-10-25 19:38 ` [PATCH 02/16] meson: Introduce new instruction set enqcmd to the build system Hao Xiang
2023-10-25 19:38 ` [PATCH 03/16] util/dsa: Add dependency idxd Hao Xiang
2023-10-25 19:38 ` [PATCH 04/16] util/dsa: Implement DSA device start and stop logic Hao Xiang
2023-10-25 19:38 ` [PATCH 05/16] util/dsa: Implement DSA task enqueue and dequeue Hao Xiang
2023-10-25 19:38 ` Hao Xiang [this message]
2023-10-25 19:38 ` [PATCH 07/16] util/dsa: Implement zero page checking in DSA task Hao Xiang
2023-10-25 19:38 ` [PATCH 08/16] util/dsa: Implement DSA task asynchronous submission and wait for completion Hao Xiang
2023-10-25 19:38 ` [PATCH 09/16] migration/multifd: Add new migration option for multifd DSA offloading Hao Xiang
2023-10-30 14:41 ` Fabiano Rosas
2023-11-06 21:58 ` [External] " Hao Xiang
2023-10-25 19:38 ` [PATCH 10/16] migration/multifd: Enable DSA offloading in multifd sender path Hao Xiang
2023-10-30 14:37 ` Fabiano Rosas
2023-10-25 19:38 ` [PATCH 11/16] migration/multifd: Add test hook to set normal page ratio Hao Xiang
2023-10-25 19:38 ` [PATCH 12/16] migration/multifd: Enable set normal page ratio test hook in multifd Hao Xiang
2023-10-25 19:38 ` [PATCH 13/16] migration/multifd: Add migration option set packet size Hao Xiang
2023-10-30 15:03 ` Fabiano Rosas
2023-10-25 19:38 ` [PATCH 14/16] migration/multifd: Enable set packet size migration option Hao Xiang
2023-10-25 19:38 ` [PATCH 15/16] util/dsa: Add unit test coverage for Intel DSA task submission and completion Hao Xiang
2023-10-25 19:38 ` [PATCH 16/16] migration/multifd: Add integration tests for multifd with Intel DSA offloading Hao Xiang
2023-10-30 15:26 ` Fabiano Rosas
2023-10-30 15:26 ` [PATCH 00/16] Use Intel DSA accelerator to offload zero page checking in multifd live migration Fabiano Rosas
2023-10-31 1:02 ` [External] " Hao Xiang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231025193822.2813204-7-hao.xiang@bytedance.com \
--to=hao.xiang@bytedance.com \
--cc=bryan.zhang@bytedance.com \
--cc=marcandre.lureau@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).