From: Jeff Cody <jcody@redhat.com>
To: qemu-block@nongnu.org
Cc: peter.maydell@linaro.org, jcody@redhat.com, qemu-devel@nongnu.org
Subject: [Qemu-devel] [PULL 04/14] block/gluster: memory usage: use one glfs instance per volume
Date: Mon, 31 Oct 2016 15:22:51 -0400 [thread overview]
Message-ID: <1477941781-4108-5-git-send-email-jcody@redhat.com> (raw)
In-Reply-To: <1477941781-4108-1-git-send-email-jcody@redhat.com>
From: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
Currently, for every drive accessed via gfapi we create a new glfs
instance (call glfs_new() followed by glfs_init()) which could consume
memory in few 100 MB's, from the table below it looks like for each
instance ~300 MB VSZ was consumed
Before:
-------
Disks VSZ RSS
1 1098728 187756
2 1430808 198656
3 1764932 199704
4 2084728 202684
This patch maintains a list of pre-opened glfs objects. On adding
a new drive belonging to the same gluster volume, we just reuse the
existing glfs object by updating its refcount.
With this approch we shrink up the unwanted memory consumption and
glfs_new/glfs_init calls for accessing a disk (file) if belongs to
same volume.
>From below table notice that the memory usage after adding a disk
(which will reuse the existing glfs object hence) is in negligible
compared to before.
After:
------
Disks VSZ RSS
1 1101964 185768
2 1109604 194920
3 1114012 196036
4 1114496 199868
Disks: number of -drive
VSZ: virtual memory size of the process in KiB
RSS: resident set size, the non-swapped physical memory (in kiloBytes)
VSZ and RSS are analyzed using 'ps aux' utility.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Message-id: 1477581890-4811-1-git-send-email-prasanna.kalever@redhat.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
---
block/gluster.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 80 insertions(+), 14 deletions(-)
diff --git a/block/gluster.c b/block/gluster.c
index 1735d12..40bd29c 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -56,6 +56,19 @@ typedef struct BDRVGlusterReopenState {
} BDRVGlusterReopenState;
+typedef struct GlfsPreopened {
+ char *volume;
+ glfs_t *fs;
+ int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+ QLIST_ENTRY(ListElement) list;
+ GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
static QemuOptsList qemu_gluster_create_opts = {
.name = "qemu-gluster-create-opts",
.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -194,6 +207,57 @@ static QemuOptsList runtime_tcp_opts = {
},
};
+static void glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+ ListElement *entry = NULL;
+
+ entry = g_new(ListElement, 1);
+
+ entry->saved.volume = g_strdup(volume);
+
+ entry->saved.fs = fs;
+ entry->saved.ref = 1;
+
+ QLIST_INSERT_HEAD(&glfs_list, entry, list);
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+ ListElement *entry = NULL;
+
+ QLIST_FOREACH(entry, &glfs_list, list) {
+ if (strcmp(entry->saved.volume, volume) == 0) {
+ entry->saved.ref++;
+ return entry->saved.fs;
+ }
+ }
+
+ return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+ ListElement *entry = NULL;
+
+ if (fs == NULL) {
+ return;
+ }
+
+ QLIST_FOREACH(entry, &glfs_list, list) {
+ if (entry->saved.fs == fs) {
+ if (--entry->saved.ref) {
+ return;
+ }
+
+ QLIST_REMOVE(entry, list);
+
+ glfs_fini(entry->saved.fs);
+ g_free(entry->saved.volume);
+ g_free(entry);
+ }
+ }
+}
+
static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
{
char *p, *q;
@@ -331,11 +395,18 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
int old_errno;
GlusterServerList *server;
+ glfs = glfs_find_preopened(gconf->volume);
+ if (glfs) {
+ return glfs;
+ }
+
glfs = glfs_new(gconf->volume);
if (!glfs) {
goto out;
}
+ glfs_set_preopened(gconf->volume, glfs);
+
for (server = gconf->server; server; server = server->next) {
if (server->value->type == GLUSTER_TRANSPORT_UNIX) {
ret = glfs_set_volfile_server(glfs,
@@ -387,7 +458,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
out:
if (glfs) {
old_errno = errno;
- glfs_fini(glfs);
+ glfs_clear_preopened(glfs);
errno = old_errno;
}
return NULL;
@@ -767,9 +838,9 @@ out:
if (s->fd) {
glfs_close(s->fd);
}
- if (s->glfs) {
- glfs_fini(s->glfs);
- }
+
+ glfs_clear_preopened(s->glfs);
+
return ret;
}
@@ -836,9 +907,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
if (s->fd) {
glfs_close(s->fd);
}
- if (s->glfs) {
- glfs_fini(s->glfs);
- }
+
+ glfs_clear_preopened(s->glfs);
/* use the newly opened image / connection */
s->fd = reop_s->fd;
@@ -863,9 +933,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
glfs_close(reop_s->fd);
}
- if (reop_s->glfs) {
- glfs_fini(reop_s->glfs);
- }
+ glfs_clear_preopened(reop_s->glfs);
g_free(state->opaque);
state->opaque = NULL;
@@ -989,9 +1057,7 @@ static int qemu_gluster_create(const char *filename,
out:
g_free(tmp);
qapi_free_BlockdevOptionsGluster(gconf);
- if (glfs) {
- glfs_fini(glfs);
- }
+ glfs_clear_preopened(glfs);
return ret;
}
@@ -1064,7 +1130,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
glfs_close(s->fd);
s->fd = NULL;
}
- glfs_fini(s->glfs);
+ glfs_clear_preopened(s->glfs);
}
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
--
2.7.4
next prev parent reply other threads:[~2016-10-31 19:23 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-10-31 19:22 [Qemu-devel] [PULL 00/14] Block patches for 2.8 Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 01/14] qapi: add release designator to gluster logfile option Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 02/14] rbd: make the code more readable Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 03/14] block: add gluster ifdef guard checks for SEEK_DATA/SEEK_HOLE support Jeff Cody
2016-10-31 19:22 ` Jeff Cody [this message]
2016-10-31 19:22 ` [Qemu-devel] [PULL 05/14] block: Turn on "unmap" in active commit Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 06/14] block/gluster: improve defense over string to int conversion Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 07/14] block/gluster: fix port type in the QAPI options list Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 08/14] blockjobs: hide internal jobs from management API Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 09/14] blockjobs: Allow creating internal jobs Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 10/14] Replication/Blockjobs: Create replication jobs as internal Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 11/14] blockjob: centralize QMP event emissions Jeff Cody
2016-10-31 19:22 ` [Qemu-devel] [PULL 12/14] Blockjobs: Internalize user_pause logic Jeff Cody
2016-10-31 19:23 ` [Qemu-devel] [PULL 13/14] blockjobs: split interface into public/private, Part 1 Jeff Cody
2016-10-31 19:23 ` [Qemu-devel] [PULL 14/14] blockjobs: fix documentation Jeff Cody
2016-11-01 10:23 ` [Qemu-devel] [PULL 00/14] Block patches for 2.8 Peter Maydell
2016-11-01 11:55 ` Jeff Cody
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1477941781-4108-5-git-send-email-jcody@redhat.com \
--to=jcody@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).