* [PATCH 1/5] nvmet-tcp: have queue io_work context run on sock incoming cpu
@ 2020-08-28 1:00 Wunderlich, Mark
2020-09-04 0:53 ` Sagi Grimberg
2020-09-17 8:54 ` Christoph Hellwig
0 siblings, 2 replies; 3+ messages in thread
From: Wunderlich, Mark @ 2020-08-28 1:00 UTC (permalink / raw)
To: linux-nvme@lists.infradead.org; +Cc: Sagi Grimberg
nvmet-tcp: have queue io_work context run on sock incoming cpu
No real good need to spread queues artificially. Usually the
target will serve multiple hosts, and it's better to run on the socket
incoming cpu for better affinitization rather than spread queues on all
online cpus.
We rely on RSS to spread the work around sufficiently.
Signed-off-by: Mark Wunderlich <mark.wunderlich@intel.com>
---
drivers/nvme/target/tcp.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 9eda91162fe4..911db3f170df 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -94,7 +94,6 @@ struct nvmet_tcp_queue {
struct socket *sock;
struct nvmet_tcp_port *port;
struct work_struct io_work;
- int cpu;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
@@ -144,7 +143,6 @@ struct nvmet_tcp_port {
struct work_struct accept_work;
struct nvmet_port *nport;
struct sockaddr_storage addr;
- int last_cpu;
void (*data_ready)(struct sock *);
};
@@ -214,6 +212,11 @@ static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
list_add_tail(&cmd->entry, &cmd->queue->free_list);
}
+static inline int queue_cpu(struct nvmet_tcp_queue *queue)
+{
+ return queue->sock->sk->sk_incoming_cpu;
+}
+
static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
{
return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
@@ -501,7 +504,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_queue *queue = cmd->queue;
llist_add(&cmd->lentry, &queue->resp_list);
- queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
}
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
@@ -1215,7 +1218,7 @@ static void nvmet_tcp_io_work(struct work_struct *w)
* We exahusted our budget, requeue our selves
*/
if (pending)
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
@@ -1375,7 +1378,7 @@ static void nvmet_tcp_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue))
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1395,7 +1398,7 @@ static void nvmet_tcp_write_space(struct sock *sk)
if (sk_stream_is_writeable(sk)) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -1504,9 +1507,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_free_connect;
- port->last_cpu = cpumask_next_wrap(port->last_cpu,
- cpu_online_mask, -1, false);
- queue->cpu = port->last_cpu;
nvmet_prepare_receive_pdu(queue);
mutex_lock(&nvmet_tcp_queue_mutex);
@@ -1517,7 +1517,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
return 0;
out_destroy_sq:
@@ -1604,7 +1604,6 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
}
port->nport = nport;
- port->last_cpu = -1;
INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
if (port->nport->inline_data_size < 0)
port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH 1/5] nvmet-tcp: have queue io_work context run on sock incoming cpu
2020-08-28 1:00 [PATCH 1/5] nvmet-tcp: have queue io_work context run on sock incoming cpu Wunderlich, Mark
@ 2020-09-04 0:53 ` Sagi Grimberg
2020-09-17 8:54 ` Christoph Hellwig
1 sibling, 0 replies; 3+ messages in thread
From: Sagi Grimberg @ 2020-09-04 0:53 UTC (permalink / raw)
To: Wunderlich, Mark, linux-nvme@lists.infradead.org
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH 1/5] nvmet-tcp: have queue io_work context run on sock incoming cpu
2020-08-28 1:00 [PATCH 1/5] nvmet-tcp: have queue io_work context run on sock incoming cpu Wunderlich, Mark
2020-09-04 0:53 ` Sagi Grimberg
@ 2020-09-17 8:54 ` Christoph Hellwig
1 sibling, 0 replies; 3+ messages in thread
From: Christoph Hellwig @ 2020-09-17 8:54 UTC (permalink / raw)
To: Wunderlich, Mark; +Cc: Sagi Grimberg, linux-nvme@lists.infradead.org
Applied to nvme-5.10 after fixing the duplicate subject line,
and fixing the author line to match the name order in the signoff.
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-09-17 8:54 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-08-28 1:00 [PATCH 1/5] nvmet-tcp: have queue io_work context run on sock incoming cpu Wunderlich, Mark
2020-09-04 0:53 ` Sagi Grimberg
2020-09-17 8:54 ` Christoph Hellwig
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).