From: Anchal Agarwal <anchalag@amazon.com>
To: tglx@linutronix.de, mingo@redhat.com, hpa@zytor.com, x86@kernel.org
Cc: jgross@suse.com, len.brown@intel.com, eduval@amazon.com,
vallish@amazon.com, netdev@vger.kernel.org, fllinden@amazon.com,
kamatam@amazon.com, rjw@rjwysocki.net,
linux-kernel@vger.kernel.org, anchalag@amazon.com,
cyberax@amazon.com, pavel@ucw.cz, linux-pm@vger.kernel.org,
xen-devel@lists.xenproject.org, boris.ostrovsky@oracle.com,
guruanb@amazon.com, roger.pau@citrix.com
Subject: [RFC PATCH 07/12] xen-netfront: add callbacks for PM suspend and hibernation support
Date: Tue, 12 Jun 2018 20:56:14 +0000 [thread overview]
Message-ID: <20180612205619.28156-8-anchalag@amazon.com> (raw)
In-Reply-To: <20180612205619.28156-1-anchalag@amazon.com>
From: Munehisa Kamata <kamatam@amazon.com>
Add freeze and restore callbacks for PM suspend and hibernation support.
The freeze handler simply disconnects the frotnend from the backend and
frees resources associated with queues after disabling the net_device
from the system. The restore handler just changes the frontend state and
let the xenbus handler to re-allocate the resources and re-connect to the
backend. This can be performed transparently to the rest of the system.
The handlers are used for both PM suspend and hibernation so that we can
keep the existing suspend/resume callbacks for Xen suspend without
modification. Freezing netfront devices is normally expected to finish within a few
hundred milliseconds, but it can rarely take more than 5 seconds and
hit the hard coded timeout, it would depend on backend state which may
be congested and/or have complex configuration. While it's rare case,
longer default timeout seems a bit more reasonable here to avoid hitting
the timeout. Also, make it configurable via module parameter so that we
can cover broader setups than what we know currently.
Signed-off-by: Munehisa Kamata <kamatam@amazon.com>
Signed-off-by: Anchal Agarwal <anchalag@amazon.com>
Reviewed-by: Eduardo Valentin <eduval@amazon.com>
Reviewed-by: Munehisa Kamata <kamatam@amazon.com>
---
drivers/net/xen-netfront.c | 97 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 96 insertions(+), 1 deletion(-)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4dd0668..4ea9284 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -43,6 +43,7 @@
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/completion.h>
#include <net/ip.h>
#include <xen/xen.h>
@@ -56,6 +57,12 @@
#include <xen/interface/memory.h>
#include <xen/interface/grant_table.h>
+enum netif_freeze_state {
+ NETIF_FREEZE_STATE_UNFROZEN,
+ NETIF_FREEZE_STATE_FREEZING,
+ NETIF_FREEZE_STATE_FROZEN,
+};
+
/* Module parameters */
#define MAX_QUEUES_DEFAULT 8
static unsigned int xennet_max_queues;
@@ -63,6 +70,12 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+static unsigned int netfront_freeze_timeout_secs = 10;
+module_param_named(freeze_timeout_secs,
+ netfront_freeze_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(freeze_timeout_secs,
+ "timeout when freezing netfront device in seconds");
+
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
@@ -160,6 +173,10 @@ struct netfront_info {
struct netfront_stats __percpu *tx_stats;
atomic_t rx_gso_checksum_fixup;
+
+ int freeze_state;
+
+ struct completion wait_backend_disconnected;
};
struct netfront_rx_info {
@@ -723,6 +740,21 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static int xennet_disable_interrupts(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i;
+ struct netfront_queue *queue;
+
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ disable_irq(queue->tx_irq);
+ disable_irq(queue->rx_irq);
+ }
+ return 0;
+}
+
static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
grant_ref_t ref)
{
@@ -1296,6 +1328,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->queues = NULL;
+ init_completion(&np->wait_backend_disconnected);
+
err = -ENOMEM;
np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->rx_stats == NULL)
@@ -1782,6 +1816,50 @@ static int xennet_create_queues(struct netfront_info *info,
return 0;
}
+static int netfront_freeze(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev_get_drvdata(&dev->dev);
+ unsigned long timeout = netfront_freeze_timeout_secs * HZ;
+ int err = 0;
+
+ xennet_disable_interrupts(info->netdev);
+
+ netif_device_detach(info->netdev);
+
+ info->freeze_state = NETIF_FREEZE_STATE_FREEZING;
+
+ /* Kick the backend to disconnect */
+ xenbus_switch_state(dev, XenbusStateClosing);
+
+ /* We don't want to move forward before the frontend is diconnected
+ * from the backend cleanly.
+ */
+ timeout = wait_for_completion_timeout(&info->wait_backend_disconnected,
+ timeout);
+ if (!timeout) {
+ err = -EBUSY;
+ xenbus_dev_error(dev, err, "Freezing timed out;"
+ "the device may become inconsistent state");
+ return err;
+ }
+
+ /* Tear down queues */
+ xennet_disconnect_backend(info);
+ xennet_destroy_queues(info);
+
+ info->freeze_state = NETIF_FREEZE_STATE_FROZEN;
+
+ return err;
+}
+
+static int netfront_restore(struct xenbus_device *dev)
+{
+ /* Kick the backend to re-connect */
+ xenbus_switch_state(dev, XenbusStateInitialising);
+
+ return 0;
+}
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_netback(struct xenbus_device *dev,
struct netfront_info *info)
@@ -1986,6 +2064,8 @@ static int xennet_connect(struct net_device *dev)
spin_unlock_bh(&queue->rx_lock);
}
+ np->freeze_state = NETIF_FREEZE_STATE_UNFROZEN;
+
return 0;
}
@@ -2025,11 +2105,23 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
wake_up_all(&module_unload_q);
- if (dev->state == XenbusStateClosed)
+ if (dev->state == XenbusStateClosed) {
+ /* dpm context is waiting for the backend */
+ if (np->freeze_state == NETIF_FREEZE_STATE_FREEZING)
+ complete(&np->wait_backend_disconnected);
break;
+ }
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
wake_up_all(&module_unload_q);
+ /* We may see unexpected Closed or Closing from the backend.
+ * Just ignore it not to prevent the frontend from being
+ * re-connected in the case of PM suspend or hibernation.
+ */
+ if (np->freeze_state == NETIF_FREEZE_STATE_FROZEN &&
+ dev->state == XenbusStateInitialising) {
+ break;
+ }
xenbus_frontend_closed(dev);
break;
}
@@ -2176,6 +2268,9 @@ static struct xenbus_driver netfront_driver = {
.probe = netfront_probe,
.remove = xennet_remove,
.resume = netfront_resume,
+ .freeze = netfront_freeze,
+ .thaw = netfront_restore,
+ .restore = netfront_restore,
.otherend_changed = netback_changed,
};
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-06-12 20:56 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-12 20:56 [RFC PATCH 00/12] Enable PM hibernation on guest VMs Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 01/12] xen/manage: keep track of the on-going suspend mode Anchal Agarwal
2018-06-13 16:42 ` Balbir Singh
2018-06-12 20:56 ` [RFC PATCH 02/12] xen/manage: introduce helper function to know " Anchal Agarwal
2018-06-13 17:41 ` Balbir Singh
2018-06-12 20:56 ` [RFC PATCH 03/12] xenbus: add freeze/thaw/restore callbacks support Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 04/12] x86/xen: Introduce new function to map HYPERVISOR_shared_info on Resume Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 05/12] x86/xen: add system core suspend and resume callbacks Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 06/12] xen-blkfront: add callbacks for PM suspend and hibernation Anchal Agarwal
2018-06-13 8:24 ` Roger Pau Monné
2018-06-13 22:20 ` Anchal Agarwal
2018-06-14 8:43 ` Roger Pau Monné
2018-06-12 20:56 ` Anchal Agarwal [this message]
2018-06-12 20:56 ` [RFC PATCH 08/12] xen-time-introduce-xen_-save-restore-_steal_clock Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 09/12] x86/xen: save and restore steal clock Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 10/12] xen/events: add xen_shutdown_pirqs helper function Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 11/12] x86/xen: close event channels for PIRQs in system core suspend callback Anchal Agarwal
2018-06-12 20:56 ` [RFC PATCH 12/12] PM / hibernate: update the resume offset on SNAPSHOT_SET_SWAP_AREA Anchal Agarwal
2018-06-14 19:45 ` Pavel Machek
2018-06-14 19:50 ` Besogonov, Aleksei
2018-06-12 21:09 ` [RFC PATCH 00/12] Enable PM hibernation on guest VMs Konrad Rzeszutek Wilk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180612205619.28156-8-anchalag@amazon.com \
--to=anchalag@amazon.com \
--cc=boris.ostrovsky@oracle.com \
--cc=cyberax@amazon.com \
--cc=eduval@amazon.com \
--cc=fllinden@amazon.com \
--cc=guruanb@amazon.com \
--cc=hpa@zytor.com \
--cc=jgross@suse.com \
--cc=kamatam@amazon.com \
--cc=len.brown@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pavel@ucw.cz \
--cc=rjw@rjwysocki.net \
--cc=roger.pau@citrix.com \
--cc=tglx@linutronix.de \
--cc=vallish@amazon.com \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).