From: Jes.Sorensen@redhat.com
To: qemu-devel@nongnu.org
Cc: lcapitulino@redhat.com, badari@us.ibm.com,
mdroth@linux.vnet.ibm.com, stefanha@linux.vnet.ibm.com,
agl@us.ibm.com
Subject: [Qemu-devel] [PATCH 2/2] Add monitor commands for fsfreeze support
Date: Fri, 4 Feb 2011 11:57:37 +0100 [thread overview]
Message-ID: <1296817057-22455-3-git-send-email-Jes.Sorensen@redhat.com> (raw)
In-Reply-To: <1296817057-22455-1-git-send-email-Jes.Sorensen@redhat.com>
From: Jes Sorensen <Jes.Sorensen@redhat.com>
This patch adds the following monitor commands:
agent_fsfreeze:
- Freezes all local file systems in the guest. Command will print
the number of file systems that were frozen.
agent_fsthaw:
- Thaws all local file systems in the guest. Command will print
the number of file systems that were thawed.
agent_fsstatus:
- Prints the current status of file systems in the guest:
Thawed, frozen, thaw in progress, freeze in progress, error.
Signed-off-by: Jes Sorensen <Jes.Sorensen@redhat.com>
---
hmp-commands.hx | 48 +++++++++++
virtagent-common.h | 1 +
virtagent.c | 235 ++++++++++++++++++++++++++++++++++++++++++++++++++++
virtagent.h | 9 ++
4 files changed, 293 insertions(+), 0 deletions(-)
diff --git a/hmp-commands.hx b/hmp-commands.hx
index 9c7ac0b..f4150da 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1310,6 +1310,54 @@ STEXI
Fetch and re-negotiate guest agent capabilties
ETEXI
+ {
+ .name = "agent_fsfreeze",
+ .args_type = "",
+ .params = "",
+ .help = "Freeze all local file systems mounted in the guest",
+ .user_print = do_agent_fsfreeze_print,
+ .mhandler.cmd_async = do_agent_fsfreeze,
+ .flags = MONITOR_CMD_ASYNC,
+ },
+
+STEXI
+@item agent_fsfreeze
+@findex agent_fsfreeze
+Freeze all local mounted file systems in guest
+ETEXI
+
+ {
+ .name = "agent_fsthaw",
+ .args_type = "",
+ .params = "",
+ .help = "Thaw all local file systems mounted in the guest",
+ .user_print = do_agent_fsthaw_print,
+ .mhandler.cmd_async = do_agent_fsthaw,
+ .flags = MONITOR_CMD_ASYNC,
+ },
+
+STEXI
+@item agent_fsthaw
+@findex agent_fsthaw
+Thaw all local mounted file systems in guest
+ETEXI
+
+ {
+ .name = "agent_fsstatus",
+ .args_type = "",
+ .params = "",
+ .help = "Display status of file system freeze progress in guest",
+ .user_print = do_agent_fsstatus_print,
+ .mhandler.cmd_async = do_agent_fsstatus,
+ .flags = MONITOR_CMD_ASYNC,
+ },
+
+STEXI
+@item agent_fsstatus
+@findex agent_fsstatus
+Get status of file system freeze in guest
+ETEXI
+
STEXI
@end table
ETEXI
diff --git a/virtagent-common.h b/virtagent-common.h
index 7c6d9ef..ff7bf23 100644
--- a/virtagent-common.h
+++ b/virtagent-common.h
@@ -24,6 +24,7 @@
#include "monitor.h"
#include "virtagent-server.h"
#include "virtagent.h"
+#include "qint.h"
#define DEBUG_VA
diff --git a/virtagent.c b/virtagent.c
index b5e7944..4277802 100644
--- a/virtagent.c
+++ b/virtagent.c
@@ -640,3 +640,238 @@ int va_send_hello(void)
xmlrpc_DECREF(params);
return ret;
}
+
+void do_agent_fsfreeze_print(Monitor *mon, const QObject *data)
+{
+ TRACE("called");
+
+ monitor_printf(mon, "File systems frozen: %" PRId64 "\n",
+ qint_get_int((qobject_to_qint(data))));
+}
+
+static void do_agent_fsfreeze_cb(const char *resp_data,
+ size_t resp_data_len,
+ MonitorCompletion *mon_cb,
+ void *mon_data)
+{
+ xmlrpc_value *resp = NULL;
+ xmlrpc_env env;
+ xmlrpc_int32 retval = 0;
+ QInt *qint;
+
+ TRACE("called");
+
+ if (resp_data == NULL) {
+ LOG("error handling RPC request");
+ return;
+ }
+
+ xmlrpc_env_init(&env);
+ resp = xmlrpc_parse_response(&env, resp_data, resp_data_len);
+ if (va_rpc_has_error(&env)) {
+ LOG("error parsing RPC response");
+ return;
+ }
+
+ xmlrpc_parse_value(&env, resp, "i", &retval);
+ if (va_rpc_has_error(&env)) {
+ retval = -1;
+ goto out;
+ }
+
+out:
+ qint = qint_from_int(retval);
+ xmlrpc_DECREF(resp);
+ if (mon_cb) {
+ mon_cb(mon_data, QOBJECT(qint));
+ }
+ qobject_decref(QOBJECT(qint));
+}
+
+int do_agent_fsfreeze(Monitor *mon, const QDict *mon_params,
+ MonitorCompletion cb, void *opaque)
+{
+ xmlrpc_env env;
+ xmlrpc_value *params;
+ int ret;
+
+ TRACE("called");
+
+ xmlrpc_env_init(&env);
+ params = xmlrpc_build_value(&env, "()");
+ if (va_rpc_has_error(&env)) {
+ return -1;
+ }
+
+ ret = va_do_rpc(&env, "va.fsfreeze", params, do_agent_fsfreeze_cb,
+ cb, opaque);
+ if (ret) {
+ qerror_report(QERR_VA_FAILED, ret, strerror(ret));
+ }
+ xmlrpc_DECREF(params);
+ return ret;
+}
+
+void do_agent_fsthaw_print(Monitor *mon, const QObject *data)
+{
+ TRACE("called");
+
+ monitor_printf(mon, "File systems thawed: %" PRId64 "\n",
+ qint_get_int((qobject_to_qint(data))));
+}
+
+static void do_agent_fsthaw_cb(const char *resp_data,
+ size_t resp_data_len,
+ MonitorCompletion *mon_cb,
+ void *mon_data)
+{
+ xmlrpc_value *resp = NULL;
+ xmlrpc_env env;
+ xmlrpc_int32 retval = 0;
+ QInt *qint;
+
+ TRACE("called");
+
+ if (resp_data == NULL) {
+ LOG("error handling RPC request");
+ return;
+ }
+
+ xmlrpc_env_init(&env);
+ resp = xmlrpc_parse_response(&env, resp_data, resp_data_len);
+ if (va_rpc_has_error(&env)) {
+ LOG("error parsing RPC response");
+ return;
+ }
+
+ xmlrpc_parse_value(&env, resp, "i", &retval);
+ if (va_rpc_has_error(&env)) {
+ retval = -1;
+ goto out;
+ }
+
+out:
+ qint = qint_from_int(retval);
+ xmlrpc_DECREF(resp);
+ if (mon_cb) {
+ mon_cb(mon_data, QOBJECT(qint));
+ }
+ qobject_decref(QOBJECT(qint));
+}
+
+int do_agent_fsthaw(Monitor *mon, const QDict *mon_params,
+ MonitorCompletion cb, void *opaque)
+{
+ xmlrpc_env env;
+ xmlrpc_value *params;
+ int ret;
+
+ TRACE("called");
+
+ xmlrpc_env_init(&env);
+ params = xmlrpc_build_value(&env, "()");
+ if (va_rpc_has_error(&env)) {
+ return -1;
+ }
+
+ ret = va_do_rpc(&env, "va.fsthaw", params, do_agent_fsthaw_cb, cb, opaque);
+ if (ret) {
+ qerror_report(QERR_VA_FAILED, ret, strerror(ret));
+ }
+ xmlrpc_DECREF(params);
+ return ret;
+}
+
+void do_agent_fsstatus_print(Monitor *mon, const QObject *data)
+{
+ int64_t fsstatus;
+ TRACE("called");
+
+ fsstatus = qint_get_int((qobject_to_qint(data)));
+
+ monitor_printf(mon, "File systems freeze status: ");
+ switch(fsstatus) {
+ case FREEZE_THAWED:
+ monitor_printf(mon, "Thawed");
+ break;
+ case FREEZE_INPROGRESS:
+ monitor_printf(mon, "Freeze in progress");
+ break;
+ case FREEZE_THAWINPROGRESS:
+ monitor_printf(mon, "Thaw in progress");
+ break;
+ case FREEZE_FROZEN:
+ monitor_printf(mon, "Frozen");
+ break;
+ case FREEZE_ERROR:
+ monitor_printf(mon, "Error");
+ break;
+ default:
+ monitor_printf(mon, "unknown");
+ }
+
+ monitor_printf(mon, "\n");
+}
+
+static void do_agent_fsstatus_cb(const char *resp_data,
+ size_t resp_data_len,
+ MonitorCompletion *mon_cb,
+ void *mon_data)
+{
+ xmlrpc_value *resp = NULL;
+ xmlrpc_env env;
+ xmlrpc_int32 retval = 0;
+ QInt *qint;
+
+ TRACE("called");
+
+ if (resp_data == NULL) {
+ LOG("error handling RPC request");
+ return;
+ }
+
+ xmlrpc_env_init(&env);
+ resp = xmlrpc_parse_response(&env, resp_data, resp_data_len);
+ if (va_rpc_has_error(&env)) {
+ LOG("error parsing RPC response");
+ return;
+ }
+
+ xmlrpc_parse_value(&env, resp, "i", &retval);
+ if (va_rpc_has_error(&env)) {
+ retval = -1;
+ goto out;
+ }
+
+out:
+ qint = qint_from_int(retval);
+ xmlrpc_DECREF(resp);
+ if (mon_cb) {
+ mon_cb(mon_data, QOBJECT(qint));
+ }
+ qobject_decref(QOBJECT(qint));
+}
+
+int do_agent_fsstatus(Monitor *mon, const QDict *mon_params,
+ MonitorCompletion cb, void *opaque)
+{
+ xmlrpc_env env;
+ xmlrpc_value *params;
+ int ret;
+
+ TRACE("called");
+
+ xmlrpc_env_init(&env);
+ params = xmlrpc_build_value(&env, "()");
+ if (va_rpc_has_error(&env)) {
+ return -1;
+ }
+
+ ret = va_do_rpc(&env, "va.fsstatus", params, do_agent_fsstatus_cb,
+ cb, opaque);
+ if (ret) {
+ qerror_report(QERR_VA_FAILED, ret, strerror(ret));
+ }
+ xmlrpc_DECREF(params);
+ return ret;
+}
diff --git a/virtagent.h b/virtagent.h
index dba90d0..0d7575d 100644
--- a/virtagent.h
+++ b/virtagent.h
@@ -46,5 +46,14 @@ int do_agent_capabilities(Monitor *mon, const QDict *mon_params,
MonitorCompletion cb, void *opaque);
int va_client_init_capabilities(void);
int va_send_hello(void);
+void do_agent_fsfreeze_print(Monitor *mon, const QObject *qobject);
+int do_agent_fsfreeze(Monitor *mon, const QDict *mon_params,
+ MonitorCompletion cb, void *opaque);
+void do_agent_fsthaw_print(Monitor *mon, const QObject *qobject);
+int do_agent_fsthaw(Monitor *mon, const QDict *mon_params,
+ MonitorCompletion cb, void *opaque);
+void do_agent_fsstatus_print(Monitor *mon, const QObject *qobject);
+int do_agent_fsstatus(Monitor *mon, const QDict *mon_params,
+ MonitorCompletion cb, void *opaque);
#endif /* VIRTAGENT_H */
--
1.7.3.5
next prev parent reply other threads:[~2011-02-04 12:30 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-02-04 10:57 [Qemu-devel] [PATCH v3 0/2] virtagent - fsfreeze support Jes.Sorensen
2011-02-04 10:57 ` [Qemu-devel] [PATCH 1/2] Add virtagent file system freeze/thaw Jes.Sorensen
2011-02-04 10:57 ` Jes.Sorensen [this message]
-- strict thread matches above, loose matches on Subject: below --
2011-02-02 8:42 [Qemu-devel] [PATCH v2 0/2] virtagent - fsfreeze support Jes.Sorensen
2011-02-02 8:42 ` [Qemu-devel] [PATCH 2/2] Add monitor commands for " Jes.Sorensen
2011-02-01 10:58 [Qemu-devel] [PATCH 0/2] virtagent - " Jes.Sorensen
2011-02-01 10:58 ` [Qemu-devel] [PATCH 2/2] Add monitor commands for " Jes.Sorensen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1296817057-22455-3-git-send-email-Jes.Sorensen@redhat.com \
--to=jes.sorensen@redhat.com \
--cc=agl@us.ibm.com \
--cc=badari@us.ibm.com \
--cc=lcapitulino@redhat.com \
--cc=mdroth@linux.vnet.ibm.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).