xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xen.org, Ian.Campbell@citrix.com,
	ian.jackson@eu.citrix.com, stefano.stabellini@eu.citrix.com,
	wei.liu2@citrix.com
Cc: Juergen Gross <jgross@suse.com>
Subject: [PATCH v2 3/3] xenstat: handle more than 1024 domains
Date: Mon,  4 Jan 2016 15:55:53 +0100	[thread overview]
Message-ID: <1451919353-11547-4-git-send-email-jgross@suse.com> (raw)
In-Reply-To: <1451919353-11547-1-git-send-email-jgross@suse.com>

get_domain_ids() in libxenstat used by read_attributes_qdisk() is
limited to 1024 domains. Remove that limit.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 tools/xenstat/libxenstat/src/xenstat_qmp.c | 67 +++++++++++++-----------------
 1 file changed, 29 insertions(+), 38 deletions(-)

diff --git a/tools/xenstat/libxenstat/src/xenstat_qmp.c b/tools/xenstat/libxenstat/src/xenstat_qmp.c
index 5e261af..5104afb 100644
--- a/tools/xenstat/libxenstat/src/xenstat_qmp.c
+++ b/tools/xenstat/libxenstat/src/xenstat_qmp.c
@@ -356,18 +356,6 @@ static int qmp_connect(char *path)
 	return s;
 }
 
-/* Get up to 1024 active domains */
-static xc_domaininfo_t *get_domain_ids(xc_interface *xc_handle, int *num_doms)
-{
-	xc_domaininfo_t *dominfo;
-
-	dominfo = calloc(1024, sizeof(xc_domaininfo_t));
-	if (dominfo == NULL)
-		return NULL;
-	*num_doms = xc_domain_getinfolist(xc_handle, 0, 1024, dominfo);
-	return dominfo;
-}
-
 /* Gather the qdisk statistics by querying QMP
    Resources: http://wiki.qemu.org/QMP and qmp-commands.hx from the qemu code
    QMP Syntax for entering command mode. This command must be issued before
@@ -398,44 +386,47 @@ void read_attributes_qdisk(xenstat_node * node)
 {
 	char *cmd_mode = "{ \"execute\": \"qmp_capabilities\" }";
 	char *query_blockstats_cmd = "{ \"execute\": \"query-blockstats\" }";
-	xc_domaininfo_t *dominfo = NULL;
+	xc_domaininfo_t dominfo[1024];
 	unsigned char *qmp_stats, *val;
 	char path[80];
 	int i, qfd, num_doms;
+	domid_t next_domid = 0;
 
-	dominfo = get_domain_ids(node->handle->xc_handle, &num_doms);
-	if (dominfo == NULL)
-		return;
+	for (;;) {
+		num_doms = xc_domain_getinfolist(node->handle->xc_handle, next_domid, 1024, dominfo);
+		if (num_doms <= 0)
+			return;
 
-	for (i=0; i<num_doms; i++) {
-		if (dominfo[i].domain <= 0)
-			continue;
+		for (i=0; i<num_doms; i++) {
+			if (dominfo[i].domain <= 0)
+				continue;
 
-		/* Verify that qdisk disks are used with this VM */
-		snprintf(path, sizeof(path),"/local/domain/0/backend/qdisk/%i", dominfo[i].domain);
-		if ((val = xs_read(node->handle->xshandle, XBT_NULL, path, NULL)) == NULL)
-			continue;
-		free(val);
+			/* Verify that qdisk disks are used with this VM */
+			snprintf(path, sizeof(path),"/local/domain/0/backend/qdisk/%i", dominfo[i].domain);
+			if ((val = xs_read(node->handle->xshandle, XBT_NULL, path, NULL)) == NULL)
+				continue;
+			free(val);
 
-		/* Connect to this VMs QMP socket */
-		snprintf(path, sizeof(path), "/var/run/xen/qmp-libxenstat-%i", dominfo[i].domain);
-		if ((qfd = qmp_connect(path)) < 0) {
-			continue;
-		}
+			/* Connect to this VMs QMP socket */
+			snprintf(path, sizeof(path), "/var/run/xen/qmp-libxenstat-%i", dominfo[i].domain);
+			if ((qfd = qmp_connect(path)) < 0) {
+				continue;
+			}
 
-		/* First enable QMP capabilities so that we can query for data */
-		if ((qmp_stats = qmp_query(qfd, cmd_mode)) != NULL) {
-			free(qmp_stats);
-			/* Query QMP for this VMs blockstats */
-			if ((qmp_stats = qmp_query(qfd, query_blockstats_cmd)) != NULL) {
-				qmp_parse_stats(node, dominfo[i].domain, qmp_stats, qfd);
+			/* First enable QMP capabilities so that we can query for data */
+			if ((qmp_stats = qmp_query(qfd, cmd_mode)) != NULL) {
 				free(qmp_stats);
+				/* Query QMP for this VMs blockstats */
+				if ((qmp_stats = qmp_query(qfd, query_blockstats_cmd)) != NULL) {
+					qmp_parse_stats(node, dominfo[i].domain, qmp_stats, qfd);
+					free(qmp_stats);
+				}
 			}
+			close(qfd);
 		}
-		close(qfd);
-	}
 
-	free(dominfo);
+		next_domid = dominfo[num_doms - 1].domain + 1;
+	}
 }
 
 #else /* !HAVE_YAJL_V2 */
-- 
2.6.2

  parent reply	other threads:[~2016-01-04 14:55 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-04 14:55 [PATCH v2 0/3] tools: remove 1024 domain limit at some places Juergen Gross
2016-01-04 14:55 ` [PATCH v2 1/3] libxl: remove the xl list limit of 1024 domains Juergen Gross
2016-01-04 16:38   ` Wei Liu
2016-01-04 14:55 ` [PATCH v2 2/3] libxl: base libxl_list_vm() on libxl_list_domain() Juergen Gross
2016-01-04 16:38   ` Wei Liu
2016-01-04 14:55 ` Juergen Gross [this message]
2016-01-04 16:38   ` [PATCH v2 3/3] xenstat: handle more than 1024 domains Wei Liu
2016-01-15 15:35     ` Ian Campbell
2016-01-15 15:39       ` Juergen Gross
2016-01-15 15:03 ` [PATCH v2 0/3] tools: remove 1024 domain limit at some places Juergen Gross
2016-01-15 15:55 ` Ian Campbell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1451919353-11547-4-git-send-email-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).