* [PATCH v1 0/7] vhost: Add support of kthread API
@ 2024-09-09 1:33 Cindy Lu
2024-09-09 1:33 ` [PATCH v1 1/7] vhost: Add a new module_param for enable kthread Cindy Lu
` (6 more replies)
0 siblings, 7 replies; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Tested the user application with QEMU.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
In commit 6e890c5d5021 ("vhost: use vhost_tasks for worker threads"),
vhost removed the support for the kthread API. However, there are
still situations where there is a request to use kthread.
In this PATCH, the support of kthread is added back. Additionally,
a module_param is added to enforce which mode we are using, and
a new UAPI is introduced to allow the userspace app to set the
mode they want to use.
Tested the userspace application with QEMU.
Cindy Lu (7):
vhost: Add a new module_param for enable kthread
vhost: Add kthread support in function vhost_worker_queue()
vhost: Add kthread support in function vhost_workers_free()
vhost: Add the vhost_worker to support kthread
vhost: Add the cgroup related function
vhost: Add kthread support in function vhost_worker_create
vhost: Add new UAPI to support change to task mode
drivers/vhost/vhost.c | 246 +++++++++++++++++++++++++++++++++++--
drivers/vhost/vhost.h | 1 +
include/uapi/linux/vhost.h | 2 +
3 files changed, 240 insertions(+), 9 deletions(-)
--
2.45.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v1 1/7] vhost: Add a new module_param for enable kthread
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-09 1:33 ` [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue() Cindy Lu
` (5 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Add a new module parameter to enable kthread while loading modules.
This parameter will identify if the vhost will use kthread or a task.
The default value will be true.
Signed-off-by: Cindy Lu <lulu@redhat.com>
module_param
---
drivers/vhost/vhost.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9ac25d08f473..be43181af659 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -41,6 +41,9 @@ static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
"Maximum number of iotlb entries. (default: 2048)");
+bool enforce_kthread = true;
+module_param(enforce_kthread, bool, 0444);
+MODULE_PARM_DESC(enforce_kthread, "enable vhost to use kthread (default: Y)");
enum {
VHOST_MEMORY_F_LOG = 0x1,
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue()
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
2024-09-09 1:33 ` [PATCH v1 1/7] vhost: Add a new module_param for enable kthread Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-11 11:13 ` kernel test robot
2024-09-09 1:33 ` [PATCH v1 3/7] vhost: Add kthread support in function vhost_workers_free() Cindy Lu
` (4 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Added back the previously removed function vhost_worker_queue() and
renamed it to vhost_worker_queue_khtread(). The new vhost_worker_queue()
will select the different mode based on the value of the parameter.
The old function vhost_work_queue() was change to support task in
commit 6e890c5d5021 ('vhost: use vhost_tasks for worker threads')
changed in
commit f9010dbdc ('fork, vhost: Use CLONE_THREAD to fix freezer/ps regression')
and also was change the name of function to vhost_worker_queue() in
commit 0921dddcb5 (vhost: take worker or vq instead of dev for queueing)
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 30 ++++++++++++++++++++++++++++--
drivers/vhost/vhost.h | 1 +
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index be43181af659..6198c165ab15 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -236,8 +236,8 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static void vhost_worker_queue(struct vhost_worker *worker,
- struct vhost_work *work)
+static void vhost_worker_queue_task(struct vhost_worker *worker,
+ struct vhost_work *work)
{
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
/* We can only add the work to the list after we're
@@ -249,6 +249,32 @@ static void vhost_worker_queue(struct vhost_worker *worker,
}
}
+static void vhost_work_queue_kthread(struct vhost_worker *worker,
+ struct vhost_work *work)
+{
+ if (!worker)
+ return;
+
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ * test_and_set_bit() implies a memory barrier.
+ */
+ llist_add(&work->node, &worker->work_list);
+
+ wake_up_process(worker->task);
+ }
+}
+
+static void vhost_worker_queue(struct vhost_worker *worker,
+ struct vhost_work *work)
+{
+ if (use_kthread) {
+ vhost_work_queue_kthread(worker, work);
+ } else {
+ vhost_worker_queue_task(worker, work);
+ }
+}
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
{
struct vhost_worker *worker;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index bb75a292d50c..c7f126fd09e8 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -27,6 +27,7 @@ struct vhost_work {
};
struct vhost_worker {
+ struct task_struct *task;
struct vhost_task *vtsk;
struct vhost_dev *dev;
/* Used to serialize device wide flushing with worker swapping. */
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v1 3/7] vhost: Add kthread support in function vhost_workers_free()
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
2024-09-09 1:33 ` [PATCH v1 1/7] vhost: Add a new module_param for enable kthread Cindy Lu
2024-09-09 1:33 ` [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue() Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-09 1:33 ` [PATCH v1 4/7] vhost: Add the vhost_worker to support kthread Cindy Lu
` (3 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Added back the previously removed function vhost_workers_free() and
renamed it to vhost_workers_free_khtread(). The new vhost_workers_free()
will select the different mode based on the value of the parameter.
The old function vhost_workers_free was change to support task in
commit 6e890c5d5021 ('vhost: use vhost_tasks for worker threads')
also changed in
commit a284f09effe ('vhost: Fix crash during early vhost_transport_send_pkt calls')
change to xarray in
commit 1cdaafa1b8b ('vhost: replace single worker pointer with xarray')
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 52 ++++++++++++++++++++++++++++++++++++++-----
1 file changed, 47 insertions(+), 5 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 6198c165ab15..411e81a2925a 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -647,8 +647,21 @@ static void vhost_detach_mm(struct vhost_dev *dev)
dev->mm = NULL;
}
-static void vhost_worker_destroy(struct vhost_dev *dev,
- struct vhost_worker *worker)
+static void vhost_worker_destroy_kthread(struct vhost_dev *dev,
+ struct vhost_worker *worker)
+{
+ if (!worker)
+ return;
+
+ WARN_ON(!llist_empty(&worker->work_list));
+
+ xa_erase(&dev->worker_xa, worker->id);
+ kthread_stop(worker->task);
+ kfree(worker);
+}
+
+static void vhost_worker_destroy_task(struct vhost_dev *dev,
+ struct vhost_worker *worker)
{
if (!worker)
return;
@@ -659,7 +672,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
kfree(worker);
}
-static void vhost_workers_free(struct vhost_dev *dev)
+static void vhost_workers_free_task(struct vhost_dev *dev)
{
struct vhost_worker *worker;
unsigned long i;
@@ -674,10 +687,36 @@ static void vhost_workers_free(struct vhost_dev *dev)
* created but couldn't clean up (it forgot or crashed).
*/
xa_for_each(&dev->worker_xa, i, worker)
- vhost_worker_destroy(dev, worker);
+ vhost_worker_destroy_task(dev, worker);
xa_destroy(&dev->worker_xa);
}
+static void vhost_workers_free_kthread(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ unsigned long i;
+
+ if (!dev->use_worker)
+ return;
+
+ for (i = 0; i < dev->nvqs; i++)
+ rcu_assign_pointer(dev->vqs[i]->worker, NULL);
+ /*
+ * Free the default worker we created and cleanup workers userspace
+ * created but couldn't clean up (it forgot or crashed).
+ */
+ xa_for_each(&dev->worker_xa, i, worker)
+ vhost_worker_destroy_kthread(dev, worker);
+ xa_destroy(&dev->worker_xa);
+}
+
+static void vhost_workers_free(struct vhost_dev *dev)
+{
+ if (use_kthread)
+ vhost_workers_free_kthread(dev);
+ else
+ vhost_workers_free_task(dev);
+}
static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{
struct vhost_worker *worker;
@@ -845,7 +884,10 @@ static int vhost_free_worker(struct vhost_dev *dev,
__vhost_worker_flush(worker);
mutex_unlock(&worker->mutex);
- vhost_worker_destroy(dev, worker);
+ if (use_kthread)
+ vhost_worker_destroy_kthread(dev, worker);
+ else
+ vhost_worker_destroy_task(dev, worker);
return 0;
}
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v1 4/7] vhost: Add the vhost_worker to support kthread
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
` (2 preceding siblings ...)
2024-09-09 1:33 ` [PATCH v1 3/7] vhost: Add kthread support in function vhost_workers_free() Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-09 1:33 ` [PATCH v1 5/7] vhost: Add the cgroup related function Cindy Lu
` (2 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Add back the previously removed vhost_worker function to support the kthread
and rename it vhost_run_work_kthread_list.
The old function vhost_worker was change to support task in
commit 6e890c5d5021 ('vhost: use vhost_tasks for worker threads')
change to xarray in
commit 1cdaafa1b8b ('vhost: replace single worker pointer with xarray')
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 411e81a2925a..fd54ee323fb1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -417,6 +417,44 @@ static void vhost_vq_reset(struct vhost_dev *dev,
__vhost_vq_meta_reset(vq);
}
+static int vhost_run_work_kthread_list(void *data)
+{
+ struct vhost_worker *worker = data;
+ struct vhost_work *work, *work_next;
+ struct vhost_dev *dev = worker->dev;
+ struct llist_node *node;
+
+ kthread_use_mm(dev->mm);
+
+ for (;;) {
+ /* mb paired w/ kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
+ node = llist_del_all(&worker->work_list);
+ if (!node)
+ schedule();
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
+ __set_current_state(TASK_RUNNING);
+ kcov_remote_start_common(worker->kcov_handle);
+ work->fn(work);
+ kcov_remote_stop();
+ cond_resched();
+ }
+ }
+ kthread_unuse_mm(dev->mm);
+
+ return 0;
+}
+
static bool vhost_run_work_list(void *data)
{
struct vhost_worker *worker = data;
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v1 5/7] vhost: Add the cgroup related function
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
` (3 preceding siblings ...)
2024-09-09 1:33 ` [PATCH v1 4/7] vhost: Add the vhost_worker to support kthread Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-09 1:33 ` [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create Cindy Lu
2024-09-09 1:33 ` [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode Cindy Lu
6 siblings, 0 replies; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Add back the previously removed cgroup function to support the kthread
The biggest change for this part is in vhost_attach_cgroups() and
vhost_worker_cgroups_kthread(). This is because of the change in
struct dev->worker_xa.
The old function was remove in
commit 6e890c5d5021 ('vhost: use vhost_tasks for worker threads')
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 52 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index fd54ee323fb1..f05545b125f0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
+#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
@@ -648,6 +649,57 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_worker_cgroups_kthread(struct vhost_worker *worker)
+{
+ struct vhost_flush_struct flush;
+ struct vhost_attach_cgroups_struct attach;
+
+ attach.owner = current;
+
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_worker_queue(worker, &attach.work);
+
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+ vhost_worker_queue(worker, &flush.work);
+ wait_for_completion(&flush.wait_event);
+
+ return attach.ret;
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ unsigned long i;
+ int ret;
+
+ /*
+ * Free the default worker we created and cleanup workers userspace
+ * created but couldn't clean up (it forgot or crashed).
+ */
+
+ xa_for_each(&dev->worker_xa, i, worker) {
+ ret = vhost_worker_cgroups_kthread(worker);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
` (4 preceding siblings ...)
2024-09-09 1:33 ` [PATCH v1 5/7] vhost: Add the cgroup related function Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-11 12:46 ` kernel test robot
2024-09-09 1:33 ` [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode Cindy Lu
6 siblings, 1 reply; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Split the function vhost_worker_create to support both task and kthread
Added back the previous old function vhost_worker_create and rename it to
vhost_worker_create_khtread to support the khtread.
The new vhost_worker_create will be selected which to use based on the
value of the parameter.
the old function vhost_worker_create was change to support task in
commit 6e890c5d5021 ('vhost: use vhost_tasks for worker threads')
also changed in
commit 1cdaafa1b8b ('vhost: replace single worker pointer with xarray')
commit c011bb669dd ('vhost: dynamically allocate vhost_worker')
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 55 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f05545b125f0..bf1e971cb06f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -807,7 +807,8 @@ static void vhost_workers_free(struct vhost_dev *dev)
else
vhost_workers_free_task(dev);
}
-static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+
+static struct vhost_worker *vhost_worker_create_task(struct vhost_dev *dev)
{
struct vhost_worker *worker;
struct vhost_task *vtsk;
@@ -848,6 +849,50 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
return NULL;
}
+static struct vhost_worker *vhost_worker_create_kthread(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ struct task_struct *task;
+ int ret;
+ u32 id;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
+ if (!worker)
+ return NULL;
+
+ worker->dev = dev;
+ worker->kcov_handle = kcov_common_handle();
+
+ mutex_init(&worker->mutex);
+ init_llist_head(&worker->work_list);
+
+ task = kthread_create(vhost_run_work_kthread_list, worker, "vhost-%d",
+ current->pid);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto free_worker;
+ }
+
+ worker->task = task;
+ wake_up_process(task); /* avoid contributing to loadavg */
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0)
+ goto stop_worker;
+ worker->id = id;
+
+ ret = vhost_attach_cgroups(dev);
+ if (ret)
+ goto stop_worker;
+
+ return worker;
+
+stop_worker:
+ kthread_stop(worker->task);
+free_worker:
+ kfree(worker);
+ return NULL;
+}
+
/* Caller must have device mutex */
static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_worker *worker)
@@ -936,6 +981,14 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
return 0;
}
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+{
+ if (use_kthread)
+ return vhost_worker_create_kthread(dev);
+ else
+ return vhost_worker_create_task(dev);
+}
+
/* Caller must have device mutex */
static int vhost_new_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
` (5 preceding siblings ...)
2024-09-09 1:33 ` [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create Cindy Lu
@ 2024-09-09 1:33 ` Cindy Lu
2024-09-11 14:10 ` kernel test robot
6 siblings, 1 reply; 11+ messages in thread
From: Cindy Lu @ 2024-09-09 1:33 UTC (permalink / raw)
To: lulu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Add a new UAPI to support setting the vhost device to
use task mode. The user space application needs to use
VHOST_SET_ENFORCE_TASK to set the mode. This setting must
be set before VHOST_SET_OWNER is set.
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 16 +++++++++++++++-
include/uapi/linux/vhost.h | 2 ++
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index bf1e971cb06f..8ed6b3a947a9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2340,14 +2340,28 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct eventfd_ctx *ctx;
u64 p;
- long r;
+ long r = 0;
int i, fd;
+ bool enforce_task;
/* If you are not the owner, you can become one */
if (ioctl == VHOST_SET_OWNER) {
r = vhost_dev_set_owner(d);
goto done;
}
+ if (ioctl == VHOST_SET_ENFORCE_TASK) {
+ /* Is there an owner already? */
+ if (vhost_dev_has_owner(d)) {
+ r = -EBUSY;
+ goto done;
+ }
+ if (copy_from_user(&kthread, argp, sizeof(enforce_task))) {
+ r = -EFAULT;
+ goto done;
+ }
+ use_kthread = enforce_task;
+ goto done;
+ }
/* You must be the owner to do anything else */
r = vhost_dev_check_owner(d);
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index b95dd84eef2d..9853d62d2d34 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -235,4 +235,6 @@
*/
#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \
struct vhost_vring_state)
+
+#define VHOST_SET_ENFORCE_TASK _IOW(VHOST_VIRTIO, 0x83, bool)
#endif
--
2.45.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue()
2024-09-09 1:33 ` [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue() Cindy Lu
@ 2024-09-11 11:13 ` kernel test robot
0 siblings, 0 replies; 11+ messages in thread
From: kernel test robot @ 2024-09-11 11:13 UTC (permalink / raw)
To: Cindy Lu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Cc: oe-kbuild-all
Hi Cindy,
kernel test robot noticed the following build errors:
[auto build test ERROR on mst-vhost/linux-next]
[also build test ERROR on linus/master v6.11-rc7 next-20240910]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Cindy-Lu/vhost-Add-a-new-module_param-for-enable-kthread/20240909-093852
base: https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link: https://lore.kernel.org/r/20240909013531.1243525-3-lulu%40redhat.com
patch subject: [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue()
config: arc-randconfig-001-20240911 (https://download.01.org/0day-ci/archive/20240911/202409111842.o3eEppU6-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240911/202409111842.o3eEppU6-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202409111842.o3eEppU6-lkp@intel.com/
All errors (new ones prefixed by >>):
drivers/vhost/vhost.c: In function 'vhost_worker_queue':
>> drivers/vhost/vhost.c:272:13: error: 'use_kthread' undeclared (first use in this function)
272 | if (use_kthread) {
| ^~~~~~~~~~~
drivers/vhost/vhost.c:272:13: note: each undeclared identifier is reported only once for each function it appears in
vim +/use_kthread +272 drivers/vhost/vhost.c
268
269 static void vhost_worker_queue(struct vhost_worker *worker,
270 struct vhost_work *work)
271 {
> 272 if (use_kthread) {
273 vhost_work_queue_kthread(worker, work);
274 } else {
275 vhost_worker_queue_task(worker, work);
276 }
277 }
278 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
279 {
280 struct vhost_worker *worker;
281 bool queued = false;
282
283 rcu_read_lock();
284 worker = rcu_dereference(vq->worker);
285 if (worker) {
286 queued = true;
287 vhost_worker_queue(worker, work);
288 }
289 rcu_read_unlock();
290
291 return queued;
292 }
293 EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
294
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create
2024-09-09 1:33 ` [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create Cindy Lu
@ 2024-09-11 12:46 ` kernel test robot
0 siblings, 0 replies; 11+ messages in thread
From: kernel test robot @ 2024-09-11 12:46 UTC (permalink / raw)
To: Cindy Lu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Cc: oe-kbuild-all
Hi Cindy,
kernel test robot noticed the following build warnings:
[auto build test WARNING on mst-vhost/linux-next]
[also build test WARNING on linus/master v6.11-rc7 next-20240910]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Cindy-Lu/vhost-Add-a-new-module_param-for-enable-kthread/20240909-093852
base: https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link: https://lore.kernel.org/r/20240909013531.1243525-7-lulu%40redhat.com
patch subject: [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create
config: arc-randconfig-001-20240911 (https://download.01.org/0day-ci/archive/20240911/202409112050.3zbvpbyT-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240911/202409112050.3zbvpbyT-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202409112050.3zbvpbyT-lkp@intel.com/
All warnings (new ones prefixed by >>):
drivers/vhost/vhost.c: In function 'vhost_worker_queue':
drivers/vhost/vhost.c:273:13: error: 'use_kthread' undeclared (first use in this function)
273 | if (use_kthread) {
| ^~~~~~~~~~~
drivers/vhost/vhost.c:273:13: note: each undeclared identifier is reported only once for each function it appears in
drivers/vhost/vhost.c: In function 'vhost_workers_free':
drivers/vhost/vhost.c:805:13: error: 'use_kthread' undeclared (first use in this function)
805 | if (use_kthread)
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_worker_create':
drivers/vhost/vhost.c:986:13: error: 'use_kthread' undeclared (first use in this function)
986 | if (use_kthread)
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_free_worker':
drivers/vhost/vhost.c:1030:13: error: 'use_kthread' undeclared (first use in this function)
1030 | if (use_kthread)
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_worker_create':
>> drivers/vhost/vhost.c:990:1: warning: control reaches end of non-void function [-Wreturn-type]
990 | }
| ^
vim +990 drivers/vhost/vhost.c
983
984 static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
985 {
986 if (use_kthread)
987 return vhost_worker_create_kthread(dev);
988 else
989 return vhost_worker_create_task(dev);
> 990 }
991
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode
2024-09-09 1:33 ` [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode Cindy Lu
@ 2024-09-11 14:10 ` kernel test robot
0 siblings, 0 replies; 11+ messages in thread
From: kernel test robot @ 2024-09-11 14:10 UTC (permalink / raw)
To: Cindy Lu, jasowang, mst, michael.christie, linux-kernel,
virtualization
Cc: oe-kbuild-all
Hi Cindy,
kernel test robot noticed the following build errors:
[auto build test ERROR on mst-vhost/linux-next]
[also build test ERROR on linus/master v6.11-rc7 next-20240911]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Cindy-Lu/vhost-Add-a-new-module_param-for-enable-kthread/20240909-093852
base: https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
patch link: https://lore.kernel.org/r/20240909013531.1243525-8-lulu%40redhat.com
patch subject: [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode
config: arc-randconfig-001-20240911 (https://download.01.org/0day-ci/archive/20240911/202409112119.BJdqPVTC-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240911/202409112119.BJdqPVTC-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202409112119.BJdqPVTC-lkp@intel.com/
All errors (new ones prefixed by >>):
drivers/vhost/vhost.c: In function 'vhost_worker_queue':
drivers/vhost/vhost.c:273:13: error: 'use_kthread' undeclared (first use in this function)
273 | if (use_kthread) {
| ^~~~~~~~~~~
drivers/vhost/vhost.c:273:13: note: each undeclared identifier is reported only once for each function it appears in
drivers/vhost/vhost.c: In function 'vhost_workers_free':
drivers/vhost/vhost.c:805:13: error: 'use_kthread' undeclared (first use in this function)
805 | if (use_kthread)
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_worker_create':
drivers/vhost/vhost.c:986:13: error: 'use_kthread' undeclared (first use in this function)
986 | if (use_kthread)
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_free_worker':
drivers/vhost/vhost.c:1030:13: error: 'use_kthread' undeclared (first use in this function)
1030 | if (use_kthread)
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_dev_ioctl':
>> drivers/vhost/vhost.c:2358:37: error: 'kthread' undeclared (first use in this function); did you mean 'kthreadd'?
2358 | if (copy_from_user(&kthread, argp, sizeof(enforce_task))) {
| ^~~~~~~
| kthreadd
drivers/vhost/vhost.c:2362:17: error: 'use_kthread' undeclared (first use in this function)
2362 | use_kthread = enforce_task;
| ^~~~~~~~~~~
drivers/vhost/vhost.c: In function 'vhost_worker_create':
drivers/vhost/vhost.c:990:1: warning: control reaches end of non-void function [-Wreturn-type]
990 | }
| ^
vim +2358 drivers/vhost/vhost.c
2337
2338 /* Caller must have device mutex */
2339 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
2340 {
2341 struct eventfd_ctx *ctx;
2342 u64 p;
2343 long r = 0;
2344 int i, fd;
2345 bool enforce_task;
2346
2347 /* If you are not the owner, you can become one */
2348 if (ioctl == VHOST_SET_OWNER) {
2349 r = vhost_dev_set_owner(d);
2350 goto done;
2351 }
2352 if (ioctl == VHOST_SET_ENFORCE_TASK) {
2353 /* Is there an owner already? */
2354 if (vhost_dev_has_owner(d)) {
2355 r = -EBUSY;
2356 goto done;
2357 }
> 2358 if (copy_from_user(&kthread, argp, sizeof(enforce_task))) {
2359 r = -EFAULT;
2360 goto done;
2361 }
2362 use_kthread = enforce_task;
2363 goto done;
2364 }
2365
2366 /* You must be the owner to do anything else */
2367 r = vhost_dev_check_owner(d);
2368 if (r)
2369 goto done;
2370
2371 switch (ioctl) {
2372 case VHOST_SET_MEM_TABLE:
2373 r = vhost_set_memory(d, argp);
2374 break;
2375 case VHOST_SET_LOG_BASE:
2376 if (copy_from_user(&p, argp, sizeof p)) {
2377 r = -EFAULT;
2378 break;
2379 }
2380 if ((u64)(unsigned long)p != p) {
2381 r = -EFAULT;
2382 break;
2383 }
2384 for (i = 0; i < d->nvqs; ++i) {
2385 struct vhost_virtqueue *vq;
2386 void __user *base = (void __user *)(unsigned long)p;
2387 vq = d->vqs[i];
2388 mutex_lock(&vq->mutex);
2389 /* If ring is inactive, will check when it's enabled. */
2390 if (vq->private_data && !vq_log_access_ok(vq, base))
2391 r = -EFAULT;
2392 else
2393 vq->log_base = base;
2394 mutex_unlock(&vq->mutex);
2395 }
2396 break;
2397 case VHOST_SET_LOG_FD:
2398 r = get_user(fd, (int __user *)argp);
2399 if (r < 0)
2400 break;
2401 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
2402 if (IS_ERR(ctx)) {
2403 r = PTR_ERR(ctx);
2404 break;
2405 }
2406 swap(ctx, d->log_ctx);
2407 for (i = 0; i < d->nvqs; ++i) {
2408 mutex_lock(&d->vqs[i]->mutex);
2409 d->vqs[i]->log_ctx = d->log_ctx;
2410 mutex_unlock(&d->vqs[i]->mutex);
2411 }
2412 if (ctx)
2413 eventfd_ctx_put(ctx);
2414 break;
2415 default:
2416 r = -ENOIOCTLCMD;
2417 break;
2418 }
2419 done:
2420 return r;
2421 }
2422 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
2423
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2024-09-11 14:11 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-09-09 1:33 [PATCH v1 0/7] vhost: Add support of kthread API Cindy Lu
2024-09-09 1:33 ` [PATCH v1 1/7] vhost: Add a new module_param for enable kthread Cindy Lu
2024-09-09 1:33 ` [PATCH v1 2/7] vhost: Add kthread support in function vhost_worker_queue() Cindy Lu
2024-09-11 11:13 ` kernel test robot
2024-09-09 1:33 ` [PATCH v1 3/7] vhost: Add kthread support in function vhost_workers_free() Cindy Lu
2024-09-09 1:33 ` [PATCH v1 4/7] vhost: Add the vhost_worker to support kthread Cindy Lu
2024-09-09 1:33 ` [PATCH v1 5/7] vhost: Add the cgroup related function Cindy Lu
2024-09-09 1:33 ` [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create Cindy Lu
2024-09-11 12:46 ` kernel test robot
2024-09-09 1:33 ` [PATCH v1 7/7] vhost: Add new UAPI to support change to task mode Cindy Lu
2024-09-11 14:10 ` kernel test robot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).