From: Stefano Stabellini <sstabellini@kernel.org>
To: xen-devel@lists.xen.org
Cc: linux-kernel@vger.kernel.org, sstabellini@kernel.org,
jgross@suse.com, boris.ostrovsky@oracle.com,
Stefano Stabellini <stefano@aporeto.com>
Subject: [PATCH v3 08/18] xen/pvcalls: implement connect command
Date: Fri, 2 Jun 2017 12:31:45 -0700 [thread overview]
Message-ID: <1496431915-20774-8-git-send-email-sstabellini@kernel.org> (raw)
In-Reply-To: <1496431915-20774-1-git-send-email-sstabellini@kernel.org>
Allocate a socket. Keep track of socket <-> ring mappings with a new data
structure, called sock_mapping. Implement the connect command by calling
inet_stream_connect, and mapping the new indexes page and data ring.
Allocate a workqueue and a work_struct, called ioworker, to perform
reads and writes to the socket.
When an active socket is closed (sk_state_change), set in_error to
-ENOTCONN and notify the other end, as specified by the protocol.
sk_data_ready and pvcalls_back_ioworker will be implemented later.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
---
drivers/xen/pvcalls-back.c | 170 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 170 insertions(+)
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 1f2bb26..3eb84ef 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -56,6 +56,40 @@ struct pvcalls_fedata {
struct work_struct register_work;
};
+struct pvcalls_ioworker {
+ struct work_struct register_work;
+ struct workqueue_struct *wq;
+ unsigned int cpu;
+};
+
+struct sock_mapping {
+ struct list_head list;
+ struct pvcalls_fedata *priv;
+ struct socket *sock;
+ uint64_t id;
+ grant_ref_t ref;
+ struct pvcalls_data_intf *ring;
+ void *bytes;
+ struct pvcalls_data data;
+ uint32_t ring_order;
+ int irq;
+ atomic_t read;
+ atomic_t write;
+ atomic_t io;
+ atomic_t release;
+ void (*saved_data_ready)(struct sock *sk);
+ struct pvcalls_ioworker ioworker;
+};
+
+static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
+static int pvcalls_back_release_active(struct xenbus_device *dev,
+ struct pvcalls_fedata *priv,
+ struct sock_mapping *map);
+
+static void pvcalls_back_ioworker(struct work_struct *work)
+{
+}
+
static int pvcalls_back_socket(struct xenbus_device *dev,
struct xen_pvcalls_request *req)
{
@@ -84,9 +118,140 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
return ret;
}
+static void pvcalls_sk_state_change(struct sock *sock)
+{
+ struct sock_mapping *map = sock->sk_user_data;
+ struct pvcalls_data_intf *intf;
+
+ if (map == NULL)
+ return;
+
+ intf = map->ring;
+ intf->in_error = -ENOTCONN;
+ notify_remote_via_irq(map->irq);
+}
+
+static void pvcalls_sk_data_ready(struct sock *sock)
+{
+}
+
+static struct sock_mapping *pvcalls_new_active_socket(
+ struct pvcalls_fedata *priv,
+ uint64_t id,
+ grant_ref_t ref,
+ uint32_t evtchn,
+ struct socket *sock)
+{
+ int ret;
+ struct sock_mapping *map = NULL;
+ void *page;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL)
+ return NULL;
+
+ map->priv = priv;
+ map->sock = sock;
+ map->id = id;
+ map->ref = ref;
+
+ ret = xenbus_map_ring_valloc(priv->dev, &ref, 1, &page);
+ if (ret < 0)
+ goto out;
+ map->ring = page;
+ map->ring_order = map->ring->ring_order;
+ /* first read the order, then map the data ring */
+ virt_rmb();
+ if (map->ring_order > MAX_RING_ORDER)
+ goto out;
+ ret = xenbus_map_ring_valloc(priv->dev, map->ring->ref,
+ (1 << map->ring_order), &page);
+ if (ret < 0)
+ goto out;
+ map->bytes = page;
+
+ ret = bind_interdomain_evtchn_to_irqhandler(priv->dev->otherend_id,
+ evtchn,
+ pvcalls_back_conn_event,
+ 0,
+ "pvcalls-backend",
+ map);
+ if (ret < 0)
+ goto out;
+ map->irq = ret;
+
+ map->data.in = map->bytes;
+ map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
+
+ map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
+ if (!map->ioworker.wq)
+ goto out;
+ map->ioworker.cpu = get_random_int() % num_online_cpus();
+ atomic_set(&map->io, 1);
+ INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
+
+ down(&priv->socket_lock);
+ list_add_tail(&map->list, &priv->socket_mappings);
+ up(&priv->socket_lock);
+
+ write_lock_bh(&map->sock->sk->sk_callback_lock);
+ map->saved_data_ready = map->sock->sk->sk_data_ready;
+ map->sock->sk->sk_user_data = map;
+ map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
+ map->sock->sk->sk_state_change = pvcalls_sk_state_change;
+ write_unlock_bh(&map->sock->sk->sk_callback_lock);
+
+ return map;
+out:
+ pvcalls_back_release_active(priv->dev, priv, map);
+ return NULL;
+}
+
static int pvcalls_back_connect(struct xenbus_device *dev,
struct xen_pvcalls_request *req)
{
+ struct pvcalls_fedata *priv;
+ int ret;
+ struct socket *sock;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_response *rsp;
+
+ priv = dev_get_drvdata(&dev->dev);
+
+ ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
+ if (ret < 0)
+ goto out;
+ ret = inet_stream_connect(sock, (struct sockaddr *)&req->u.connect.addr,
+ req->u.connect.len, req->u.connect.flags);
+ if (ret < 0) {
+ sock_release(map->sock);
+ goto out;
+ }
+
+ map = pvcalls_new_active_socket(priv,
+ req->u.connect.id,
+ req->u.connect.ref,
+ req->u.connect.evtchn,
+ sock);
+ if (!map) {
+ sock_release(map->sock);
+ goto out;
+ }
+
+out:
+ rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++);
+ rsp->req_id = req->req_id;
+ rsp->cmd = req->cmd;
+ rsp->u.connect.id = req->u.connect.id;
+ rsp->ret = ret;
+
+ return ret;
+}
+
+static int pvcalls_back_release_active(struct xenbus_device *dev,
+ struct pvcalls_fedata *priv,
+ struct sock_mapping *map)
+{
return 0;
}
@@ -206,6 +371,11 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
+{
+ return IRQ_HANDLED;
+}
+
static int backend_connect(struct xenbus_device *dev)
{
int err, evtchn;
--
1.9.1
next prev parent reply other threads:[~2017-06-02 19:32 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-02 19:31 [PATCH v3 00/18] introduce the Xen PV Calls backend Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 01/18] xen: introduce the pvcalls interface header Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 02/18] xen/pvcalls: introduce the pvcalls xenbus backend Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 03/18] xen/pvcalls: initialize the module and register the " Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 04/18] xen/pvcalls: xenbus state handling Stefano Stabellini
2017-06-12 21:19 ` Boris Ostrovsky
2017-06-02 19:31 ` [PATCH v3 05/18] xen/pvcalls: connect to a frontend Stefano Stabellini
2017-06-12 21:27 ` Boris Ostrovsky
2017-06-02 19:31 ` [PATCH v3 06/18] xen/pvcalls: handle commands from the frontend Stefano Stabellini
2017-06-12 22:03 ` Boris Ostrovsky
2017-06-14 21:03 ` Stefano Stabellini
2017-06-15 14:33 ` Boris Ostrovsky
2017-06-15 18:54 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 07/18] xen/pvcalls: implement socket command Stefano Stabellini
2017-06-13 6:06 ` Juergen Gross
2017-06-14 0:46 ` Stefano Stabellini
2017-06-02 19:31 ` Stefano Stabellini [this message]
2017-06-13 6:17 ` [PATCH v3 08/18] xen/pvcalls: implement connect command Juergen Gross
2017-06-14 0:46 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 09/18] xen/pvcalls: implement bind command Stefano Stabellini
2017-06-13 7:00 ` Juergen Gross
2017-06-14 0:46 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 10/18] xen/pvcalls: implement listen command Stefano Stabellini
2017-06-13 7:00 ` Juergen Gross
2017-06-02 19:31 ` [PATCH v3 11/18] xen/pvcalls: implement accept command Stefano Stabellini
2017-06-13 7:24 ` Juergen Gross
2017-06-14 0:47 ` Stefano Stabellini
2017-06-14 6:32 ` Juergen Gross
2017-06-14 19:27 ` Stefano Stabellini
2017-06-15 6:58 ` Juergen Gross
2017-06-15 17:54 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 12/18] xen/pvcalls: implement poll command Stefano Stabellini
2017-06-13 7:28 ` Juergen Gross
2017-06-14 0:47 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 13/18] xen/pvcalls: implement release command Stefano Stabellini
2017-06-13 7:39 ` Juergen Gross
2017-06-14 0:47 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 14/18] xen/pvcalls: disconnect and module_exit Stefano Stabellini
2017-06-13 7:49 ` Juergen Gross
2017-06-14 0:54 ` Stefano Stabellini
2017-06-14 13:51 ` Boris Ostrovsky
2017-06-14 19:15 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 15/18] xen/pvcalls: implement the ioworker functions Stefano Stabellini
2017-06-13 7:51 ` Juergen Gross
2017-06-14 0:56 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 16/18] xen/pvcalls: implement read Stefano Stabellini
2017-06-13 7:56 ` Juergen Gross
2017-06-14 0:57 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 17/18] xen/pvcalls: implement write Stefano Stabellini
2017-06-13 7:59 ` Juergen Gross
2017-06-14 1:00 ` Stefano Stabellini
2017-06-02 19:31 ` [PATCH v3 18/18] xen: introduce a Kconfig option to enable the pvcalls backend Stefano Stabellini
2017-06-13 6:02 ` Juergen Gross
2017-06-13 23:46 ` Stefano Stabellini
2017-06-12 21:17 ` [PATCH v3 01/18] xen: introduce the pvcalls interface header Boris Ostrovsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1496431915-20774-8-git-send-email-sstabellini@kernel.org \
--to=sstabellini@kernel.org \
--cc=boris.ostrovsky@oracle.com \
--cc=jgross@suse.com \
--cc=linux-kernel@vger.kernel.org \
--cc=stefano@aporeto.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).