From: Joao Martins <joao.martins@neclab.eu>
To: <xen-devel@lists.xenproject.org>, <netdev@vger.kernel.org>
Cc: wei.liu2@citrix.com, ian.campbell@citrix.com,
Joao Martins <joao.martins@neclab.eu>,
david.vrabel@citrix.com, boris.ostrovsky@oracle.com
Subject: [RFC PATCH 01/13] xen-netback: add persistent grant tree ops
Date: Tue, 12 May 2015 19:18:25 +0200 [thread overview]
Message-ID: <1431451117-70051-2-git-send-email-joao.martins@neclab.eu> (raw)
In-Reply-To: <1431451117-70051-1-git-send-email-joao.martins@neclab.eu>
Implement the necessary routines for managing the grant tree. These
routines are ported from blkback driver and slightly modified to be
more generic. This patch is separated because it relates to code that
could be shared with other drivers, in case persistent grants are adopted.
The changes compared to blkback are: declaring a struct persistent_gnt_tree
to store grant tree info so that these routines are called with a tree
argument rather than a driver private data structure. It has a pool of
free pages that should be used for grant maps to be added to the tree.
We can't sleep on xenvif_tx_action/xenvif_start_xmit, so this pool is
prefilled with xen ballooned pages when initializing the tree.
Regarding *_persistent_gnt API changes: get_persistent_gnt() will return
ERR_PTR(-EBUSY) if we try to fetch an already in use grant ref. This is
useful on netback case so that we fallback to map/unmap in case we try to
fetch an already in use grant. This way we save a map (plus unmap on
error) and prevent the error on add_persistent_gnt that would also lead
towards dropping the packet.
Signed-off-by: Joao Martins <joao.martins@neclab.eu>
---
drivers/net/xen-netback/common.h | 57 +++++++++++++++
drivers/net/xen-netback/netback.c | 145 ++++++++++++++++++++++++++++++++++++++
2 files changed, 202 insertions(+)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b3..dd02386 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -106,6 +106,48 @@ struct xenvif_rx_meta {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+/* Number of available flags */
+#define PERSISTENT_GNT_FLAGS_SIZE 2
+/* This persistent grant is currently in use */
+#define PERSISTENT_GNT_ACTIVE 0
+/* This persistent grant has been used, this flag is set when we remove the
+ * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
+ */
+#define PERSISTENT_GNT_WAS_ACTIVE 1
+
+struct persistent_gnt {
+ struct page *page; /* mapped page */
+ grant_ref_t gnt;
+ grant_handle_t handle;
+ DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
+ struct rb_node node;
+};
+
+struct persistent_gnt_tree {
+ /* Tree to store persistent grants */
+ struct rb_root root;
+
+ /* Number of grants in use */
+ atomic_t gnt_in_use;
+
+ /* Number of grants in the tree */
+ unsigned int gnt_c;
+
+ /* Maximum number of grants in the tree */
+ unsigned int gnt_max;
+
+ /* True if we reached maximum number of
+ * persistent grants in the tree
+ */
+ bool overflow;
+
+ /* Free pages for grant maps */
+ struct list_head free_pages;
+
+ /* Initialized with <gnt_max> pages */
+ unsigned int free_pages_num;
+};
+
struct xenvif;
struct xenvif_stats {
@@ -224,6 +266,7 @@ struct xenvif {
u8 can_sg:1;
u8 ip_csum:1;
u8 ipv6_csum:1;
+ u8 persistent_grants:1;
/* Is this interface disabled? True when backend discovers
* frontend is rogue.
@@ -344,4 +387,18 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb);
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
+/* tree ops for persistent grants */
+struct persistent_gnt *get_persistent_gnt(struct persistent_gnt_tree *tree,
+ grant_ref_t gref);
+int add_persistent_gnt(struct persistent_gnt_tree *tree,
+ struct persistent_gnt *persistent_gnt);
+void put_persistent_gnt(struct persistent_gnt_tree *tree,
+ struct persistent_gnt *persistent_gnt);
+void free_persistent_gnts(struct persistent_gnt_tree *tree, unsigned int num);
+/* Gets one page from the free pool in the tree */
+int get_free_page(struct persistent_gnt_tree *tree, struct page **page);
+/* Adds pages to the free pool in the tree */
+void put_free_pages(struct persistent_gnt_tree *tree, struct page **page,
+ int num);
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4de46aa..8df0a73 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -107,6 +107,151 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue
u16 size,
u16 flags);
+#define foreach_grant_safe(pos, n, rbtree, node) \
+ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+ (n) = (&(pos)->node) ? rb_next(&(pos)->node) : NULL; \
+ &(pos)->node; \
+ (pos) = container_of(n, typeof(*(pos)), node), \
+ (n) = (&(pos)->node) ? rb_next(&(pos)->node) : NULL)
+
+int add_persistent_gnt(struct persistent_gnt_tree *tree,
+ struct persistent_gnt *persistent_gnt)
+{
+ struct rb_node **new = NULL, *parent = NULL;
+ struct persistent_gnt *this;
+
+ if (tree->gnt_c >= tree->gnt_max) {
+ pr_err("Using maximum number of peristent grants\n");
+ tree->overflow = true;
+ return -EBUSY;
+ }
+ /* Figure out where to put new node */
+ new = &tree->root.rb_node;
+ while (*new) {
+ this = container_of(*new, struct persistent_gnt, node);
+
+ parent = *new;
+ if (persistent_gnt->gnt < this->gnt) {
+ new = &((*new)->rb_left);
+ } else if (persistent_gnt->gnt > this->gnt) {
+ new = &((*new)->rb_right);
+ } else {
+ pr_err("Trying to add a gref that's already in the tree\n");
+ return -EINVAL;
+ }
+ }
+
+ bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
+ set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ /* Add new node and rebalance tree. */
+ rb_link_node(&persistent_gnt->node, parent, new);
+ rb_insert_color(&persistent_gnt->node, &tree->root);
+ tree->gnt_c++;
+ atomic_inc(&tree->gnt_in_use);
+ return 0;
+}
+
+struct persistent_gnt *get_persistent_gnt(struct persistent_gnt_tree *tree,
+ grant_ref_t gref)
+{
+ struct persistent_gnt *data;
+ struct rb_node *node = NULL;
+
+ node = tree->root.rb_node;
+ while (node) {
+ data = container_of(node, struct persistent_gnt, node);
+
+ if (gref < data->gnt) {
+ node = node->rb_left;
+ } else if (gref > data->gnt) {
+ node = node->rb_right;
+ } else {
+ if (test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
+ pr_err("Requesting a grant already in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+ set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
+ atomic_inc(&tree->gnt_in_use);
+ return data;
+ }
+ }
+ return NULL;
+}
+
+void put_persistent_gnt(struct persistent_gnt_tree *tree,
+ struct persistent_gnt *persistent_gnt)
+{
+ if (!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ pr_alert("Freeing a grant already unused\n");
+ set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
+ clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ atomic_dec(&tree->gnt_in_use);
+}
+
+void free_persistent_gnts(struct persistent_gnt_tree *tree, unsigned int num)
+
+{
+ struct gnttab_unmap_grant_ref unmap[FATAL_SKB_SLOTS_DEFAULT];
+ struct page *pages[FATAL_SKB_SLOTS_DEFAULT];
+ struct persistent_gnt *persistent_gnt;
+ struct rb_root *root = &tree->root;
+ struct rb_node *n;
+ int ret = 0;
+ int pages_to_unmap = 0;
+ void *addr;
+
+ foreach_grant_safe(persistent_gnt, n, root, node) {
+ BUG_ON(persistent_gnt->handle ==
+ NETBACK_INVALID_HANDLE);
+
+ addr = pfn_to_kaddr(page_to_pfn(persistent_gnt->page));
+ gnttab_set_unmap_op(&unmap[pages_to_unmap],
+ (unsigned long)addr,
+ GNTMAP_host_map | GNTMAP_readonly,
+ persistent_gnt->handle);
+
+ pages[pages_to_unmap] = persistent_gnt->page;
+
+ if (++pages_to_unmap == FATAL_SKB_SLOTS_DEFAULT ||
+ !rb_next(&persistent_gnt->node)) {
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ pages_to_unmap);
+ BUG_ON(ret);
+ put_free_pages(tree, pages, pages_to_unmap);
+ pages_to_unmap = 0;
+ }
+
+ rb_erase(&persistent_gnt->node, root);
+ kfree(persistent_gnt);
+ num--;
+ }
+ BUG_ON(num != 0);
+}
+
+int get_free_page(struct persistent_gnt_tree *tree,
+ struct page **page)
+{
+ if (list_empty(&tree->free_pages)) {
+ BUG_ON(tree->free_pages_num != 0);
+ return 1;
+ }
+ BUG_ON(tree->free_pages_num == 0);
+ page[0] = list_first_entry(&tree->free_pages, struct page, lru);
+ list_del(&page[0]->lru);
+ tree->free_pages_num--;
+ return 0;
+}
+
+void put_free_pages(struct persistent_gnt_tree *tree,
+ struct page **page, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ list_add(&page[i]->lru, &tree->free_pages);
+ tree->free_pages_num += num;
+}
+
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
u16 idx)
{
--
2.1.3
next prev parent reply other threads:[~2015-05-12 17:18 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-12 17:18 [RFC PATCH 00/13] Persistent grant maps for xen net drivers Joao Martins
2015-05-12 17:18 ` Joao Martins [this message]
2015-05-12 17:18 ` [RFC PATCH 02/13] xen-netback: xenbus feature persistent support Joao Martins
2015-05-19 15:19 ` Wei Liu
2015-05-22 10:24 ` Joao Martins
2015-05-12 17:18 ` [RFC PATCH 03/13] xen-netback: implement TX persistent grants Joao Martins
2015-05-19 15:23 ` Wei Liu
2015-05-22 10:24 ` Joao Martins
2015-06-02 14:53 ` Wei Liu
2015-06-03 17:07 ` Joao Martins
2015-06-07 12:04 ` Wei Liu
2015-05-12 17:18 ` [RFC PATCH 04/13] xen-netback: implement RX " Joao Martins
2015-05-19 15:32 ` Wei Liu
2015-05-22 10:25 ` Joao Martins
2015-06-02 15:07 ` Wei Liu
2015-06-03 17:08 ` Joao Martins
2015-05-12 17:18 ` [RFC PATCH 05/13] xen-netback: refactor xenvif_rx_action Joao Martins
2015-05-19 15:32 ` Wei Liu
2015-05-12 17:18 ` [RFC PATCH 06/13] xen-netback: copy buffer on xenvif_start_xmit() Joao Martins
2015-05-19 15:35 ` Wei Liu
2015-05-22 10:26 ` Joao Martins
2015-06-02 15:10 ` Wei Liu
2015-05-12 17:18 ` [RFC PATCH 07/13] xen-netback: add persistent tree counters to debugfs Joao Martins
2015-05-19 15:36 ` Wei Liu
2015-05-12 17:18 ` [RFC PATCH 08/13] xen-netback: clone skb if skb->xmit_more is set Joao Martins
2015-05-19 15:36 ` Wei Liu
2015-05-22 17:14 ` Joao Martins
2015-05-12 17:18 ` [RFC PATCH 09/13] xen-netfront: move grant_{ref, page} to struct grant Joao Martins
2015-05-18 15:44 ` [Xen-devel] " David Vrabel
2015-05-19 10:19 ` Joao Martins
2015-05-12 17:18 ` [RFC PATCH 10/13] xen-netfront: refactor claim/release grant Joao Martins
2015-05-18 15:48 ` [Xen-devel] " David Vrabel
2015-05-19 10:19 ` Joao Martins
2015-05-12 17:18 ` [RFC PATCH 11/13] xen-netfront: feature-persistent xenbus support Joao Martins
2015-05-18 15:51 ` [Xen-devel] " David Vrabel
2015-05-19 10:19 ` Joao Martins
2015-05-12 17:18 ` [RFC PATCH 12/13] xen-netfront: implement TX persistent grants Joao Martins
2015-05-18 15:55 ` [Xen-devel] " David Vrabel
2015-05-19 10:20 ` Joao Martins
2015-05-19 10:23 ` David Vrabel
2015-05-12 17:18 ` [RFC PATCH 13/13] xen-netfront: implement RX " Joao Martins
2015-05-18 16:04 ` [Xen-devel] " David Vrabel
2015-05-19 10:22 ` Joao Martins
2015-05-13 10:50 ` [Xen-devel] [RFC PATCH 00/13] Persistent grant maps for xen net drivers David Vrabel
2015-05-13 13:01 ` Joao Martins
2015-05-19 15:39 ` Wei Liu
2015-05-22 10:27 ` Joao Martins
2015-05-29 6:53 ` [Xen-devel] " Yuzhou (C)
2015-05-29 14:51 ` Joao Martins
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1431451117-70051-2-git-send-email-joao.martins@neclab.eu \
--to=joao.martins@neclab.eu \
--cc=boris.ostrovsky@oracle.com \
--cc=david.vrabel@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=netdev@vger.kernel.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).