From: Jonathan Lemon <jonathan.lemon@gmail.com>
To: <netdev@vger.kernel.org>
Cc: <kernel-team@fb.com>, <axboe@kernel.dk>
Subject: [RFC PATCH 08/21] misc: add shqueue.h for prototyping
Date: Thu, 18 Jun 2020 09:09:28 -0700 [thread overview]
Message-ID: <20200618160941.879717-9-jonathan.lemon@gmail.com> (raw)
In-Reply-To: <20200618160941.879717-1-jonathan.lemon@gmail.com>
Shared queues between user and kernel use their own private structures
for accessing a shared data area, but they need to use the same queue
functions.
Rather than doing the 'right' thing and duplicating the file for
each domain, temporary cheat for prototyping and use a single shared
file.
Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
---
include/uapi/misc/shqueue.h | 205 ++++++++++++++++++++++++++++++++++++
1 file changed, 205 insertions(+)
create mode 100644 include/uapi/misc/shqueue.h
diff --git a/include/uapi/misc/shqueue.h b/include/uapi/misc/shqueue.h
new file mode 100644
index 000000000000..258b9db35dbd
--- /dev/null
+++ b/include/uapi/misc/shqueue.h
@@ -0,0 +1,205 @@
+#pragma once
+
+/* XXX
+ * This is not a user api, but placed here for prototyping, in order to
+ * avoid two nigh identical copies for user and kernel space.
+ */
+
+/* kernel only */
+struct shared_queue_map {
+ unsigned prod ____cacheline_aligned_in_smp;
+ unsigned cons ____cacheline_aligned_in_smp;
+ char data[] ____cacheline_aligned_in_smp;
+};
+
+/* user and kernel private copy - identical in order to share sq* fcns */
+struct shared_queue {
+ unsigned *prod;
+ unsigned *cons;
+ char *data;
+ unsigned elt_sz;
+ unsigned mask;
+ unsigned cached_prod;
+ unsigned cached_cons;
+ unsigned entries;
+
+ unsigned map_sz;
+ void *map_ptr;
+};
+
+/*
+ * see documenation in tools/include/linux/ring_buffer.h
+ * using explicit smp_/_ONCE is an optimization over smp_{store|load}
+ */
+
+static inline void __sq_load_acquire_cons(struct shared_queue *q)
+{
+ /* Refresh the local tail pointer */
+ q->cached_cons = READ_ONCE(*q->cons);
+ /* A, matches D */
+}
+
+static inline void __sq_store_release_cons(struct shared_queue *q)
+{
+ smp_mb(); /* D, matches A */
+ WRITE_ONCE(*q->cons, q->cached_cons);
+}
+
+static inline void __sq_load_acquire_prod(struct shared_queue *q)
+{
+ /* Refresh the local pointer */
+ q->cached_prod = READ_ONCE(*q->prod);
+ smp_rmb(); /* C, matches B */
+}
+
+static inline void __sq_store_release_prod(struct shared_queue *q)
+{
+ smp_wmb(); /* B, matches C */
+ WRITE_ONCE(*q->prod, q->cached_prod);
+}
+
+static inline void sq_cons_refresh(struct shared_queue *q)
+{
+ __sq_store_release_cons(q);
+ __sq_load_acquire_prod(q);
+}
+
+static inline bool sq_empty(struct shared_queue *q)
+{
+ return READ_ONCE(*q->prod) == READ_ONCE(*q->cons);
+}
+
+static inline bool sq_cons_empty(struct shared_queue *q)
+{
+ return q->cached_prod == q->cached_cons;
+}
+
+static inline unsigned __sq_cons_ready(struct shared_queue *q)
+{
+ return q->cached_prod - q->cached_cons;
+}
+
+static inline unsigned sq_cons_ready(struct shared_queue *q)
+{
+ if (q->cached_prod == q->cached_cons)
+ __sq_load_acquire_prod(q);
+
+ return q->cached_prod - q->cached_cons;
+}
+
+static inline bool sq_cons_avail(struct shared_queue *q, unsigned count)
+{
+ if (count <= __sq_cons_ready(q))
+ return true;
+ __sq_load_acquire_prod(q);
+ return count <= __sq_cons_ready(q);
+}
+
+static inline void *sq_get_ptr(struct shared_queue *q, unsigned idx)
+{
+ return q->data + (idx & q->mask) * q->elt_sz;
+}
+
+static inline void sq_cons_complete(struct shared_queue *q)
+{
+ __sq_store_release_cons(q);
+}
+
+static inline void *sq_cons_peek(struct shared_queue *q)
+{
+ if (sq_cons_empty(q)) {
+ sq_cons_refresh(q);
+ if (sq_cons_empty(q))
+ return NULL;
+ }
+ return sq_get_ptr(q, q->cached_cons);
+}
+
+static inline unsigned
+sq_peek_batch(struct shared_queue *q, void **ptr, unsigned count)
+{
+ unsigned i, idx, ready;
+
+ ready = sq_cons_ready(q);
+ if (!ready)
+ return 0;
+
+ count = count > ready ? ready : count;
+
+ idx = q->cached_cons;
+ for (i = 0; i < count; i++)
+ ptr[i] = sq_get_ptr(q, idx++);
+
+ q->cached_cons += count;
+
+ return count;
+}
+
+static inline unsigned
+sq_cons_batch(struct shared_queue *q, void **ptr, unsigned count)
+{
+ unsigned i, idx, ready;
+
+ ready = sq_cons_ready(q);
+ if (!ready)
+ return 0;
+
+ count = count > ready ? ready : count;
+
+ idx = q->cached_cons;
+ for (i = 0; i < count; i++)
+ ptr[i] = sq_get_ptr(q, idx++);
+
+ q->cached_cons += count;
+ sq_cons_complete(q);
+
+ return count;
+}
+
+static inline void sq_cons_advance(struct shared_queue *q)
+{
+ q->cached_cons++;
+}
+
+static inline unsigned __sq_prod_space(struct shared_queue *q)
+{
+ return q->entries - (q->cached_prod - q->cached_cons);
+}
+
+static inline unsigned sq_prod_space(struct shared_queue *q)
+{
+ unsigned space;
+
+ space = __sq_prod_space(q);
+ if (!space) {
+ __sq_load_acquire_cons(q);
+ space = __sq_prod_space(q);
+ }
+ return space;
+}
+
+static inline bool sq_prod_avail(struct shared_queue *q, unsigned count)
+{
+ if (count <= __sq_prod_space(q))
+ return true;
+ __sq_load_acquire_cons(q);
+ return count <= __sq_prod_space(q);
+}
+
+static inline void *sq_prod_get_ptr(struct shared_queue *q)
+{
+ return sq_get_ptr(q, q->cached_prod++);
+}
+
+static inline void *sq_prod_reserve(struct shared_queue *q)
+{
+ if (!sq_prod_space(q))
+ return NULL;
+
+ return sq_prod_get_ptr(q);
+}
+
+static inline void sq_prod_submit(struct shared_queue *q)
+{
+ __sq_store_release_prod(q);
+}
--
2.24.1
next prev parent reply other threads:[~2020-06-18 16:10 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-18 16:09 [RFC PATCH 00/21] netgpu: networking between NIC and GPU/CPU Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 01/21] mm: add {add|release}_memory_pages Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 02/21] mm: Allow DMA mapping of pages which are not online Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 03/21] tcp: Pad TCP options out to a fixed size Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 04/21] mlx5: add definitions for header split and netgpu Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 05/21] mlx5/xsk: check that xsk does not conflict with netgpu Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 06/21] mlx5: add header_split flag Jonathan Lemon
2020-06-18 18:12 ` Eric Dumazet
2020-06-18 20:25 ` Michal Kubecek
2020-06-18 22:45 ` Eric Dumazet
2020-06-18 21:50 ` Jonathan Lemon
2020-06-18 22:34 ` Eric Dumazet
2020-06-18 22:36 ` Eric Dumazet
2020-06-18 16:09 ` [RFC PATCH 07/21] mlx5: remove the umem parameter from mlx5e_open_channel Jonathan Lemon
2020-06-18 16:09 ` Jonathan Lemon [this message]
2020-06-18 16:09 ` [RFC PATCH 09/21] include: add definitions for netgpu Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 10/21] mlx5: add netgpu queue functions Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 11/21] skbuff: add a zc_netgpu bitflag Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 12/21] mlx5: hook up the netgpu channel functions Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 13/21] netdevice: add SETUP_NETGPU to the netdev_bpf structure Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 14/21] kernel: export free_uid Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 15/21] netgpu: add network/gpu dma module Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 16/21] lib: have __zerocopy_sg_from_iter get netgpu pages for a sk Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 17/21] net/core: add the SO_REGISTER_DMA socket option Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 18/21] tcp: add MSG_NETDMA flag for sendmsg() Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 19/21] core: add page recycling logic for netgpu pages Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 20/21] core/skbuff: use skb_zdata for testing whether skb is zerocopy Jonathan Lemon
2020-06-18 16:09 ` [RFC PATCH 21/21] mlx5: add XDP_SETUP_NETGPU hook Jonathan Lemon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200618160941.879717-9-jonathan.lemon@gmail.com \
--to=jonathan.lemon@gmail.com \
--cc=axboe@kernel.dk \
--cc=kernel-team@fb.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).