linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Wanpeng Li <liwanp@linux.vnet.ibm.com>
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>,
	Seth Jennings <sjenning@linux.vnet.ibm.com>,
	Konrad Rzeszutek Wilk <konrad@darnok.org>,
	Minchan Kim <minchan@kernel.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Bob Liu <bob.liu@oracle.com>,
	Wanpeng Li <liwanp@linux.vnet.ibm.com>
Subject: [PATCH PART3 v3 4/6] staging: ramster: Add incremental accessory counters
Date: Sat, 13 Apr 2013 08:36:08 +0800	[thread overview]
Message-ID: <1365813371-19006-4-git-send-email-liwanp@linux.vnet.ibm.com> (raw)
In-Reply-To: <1365813371-19006-1-git-send-email-liwanp@linux.vnet.ibm.com>

Add incremental accessory counters that are going to be used for 
debug fs entries.

Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
 drivers/staging/zcache/ramster/debug.h   |   67 ++++++++++++++++++++++++++++++
 drivers/staging/zcache/ramster/ramster.c |   32 +++++++-------
 2 files changed, 83 insertions(+), 16 deletions(-)

diff --git a/drivers/staging/zcache/ramster/debug.h b/drivers/staging/zcache/ramster/debug.h
index 17a8435..7b2deaa 100644
--- a/drivers/staging/zcache/ramster/debug.h
+++ b/drivers/staging/zcache/ramster/debug.h
@@ -60,6 +60,59 @@ extern ssize_t ramster_remote_page_flushes_failed;
 
 int ramster_debugfs_init(void);
 
+static inline void inc_ramster_eph_pages_remoted(void)
+{
+	ramster_eph_pages_remoted++;
+};
+static inline void inc_ramster_pers_pages_remoted(void)
+{
+	ramster_pers_pages_remoted++;
+};
+static inline void inc_ramster_eph_pages_remote_failed(void)
+{
+	ramster_eph_pages_remote_failed++;
+};
+static inline void inc_ramster_pers_pages_remote_failed(void)
+{
+	ramster_pers_pages_remote_failed++;
+};
+static inline void inc_ramster_remote_eph_pages_succ_get(void)
+{
+	ramster_remote_eph_pages_succ_get++;
+};
+static inline void inc_ramster_remote_pers_pages_succ_get(void)
+{
+	ramster_remote_pers_pages_succ_get++;
+};
+static inline void inc_ramster_remote_eph_pages_unsucc_get(void)
+{
+	ramster_remote_eph_pages_unsucc_get++;
+};
+static inline void inc_ramster_remote_pers_pages_unsucc_get(void)
+{
+	ramster_remote_pers_pages_unsucc_get++;
+};
+static inline void inc_ramster_pers_pages_remote_nomem(void)
+{
+	ramster_pers_pages_remote_nomem++;
+};
+static inline void inc_ramster_remote_objects_flushed(void)
+{
+	ramster_remote_objects_flushed++;
+};
+static inline void inc_ramster_remote_object_flushes_failed(void)
+{
+	ramster_remote_object_flushes_failed++;
+};
+static inline void inc_ramster_remote_pages_flushed(void)
+{
+	ramster_remote_pages_flushed++;
+};
+static inline void inc_ramster_remote_page_flushes_failed(void)
+{
+	ramster_remote_page_flushes_failed++;
+};
+
 #else
 
 static inline void inc_ramster_flnodes(void) { };
@@ -69,6 +122,20 @@ static inline void dec_ramster_foreign_eph_pages(void) { };
 static inline void inc_ramster_foreign_pers_pages(void) { };
 static inline void dec_ramster_foreign_pers_pages(void) { };
 
+static inline void inc_ramster_eph_pages_remoted(void) { };
+static inline void inc_ramster_pers_pages_remoted(void) { };
+static inline void inc_ramster_eph_pages_remote_failed(void) { };
+static inline void inc_ramster_pers_pages_remote_failed(void) { };
+static inline void inc_ramster_remote_eph_pages_succ_get(void) { };
+static inline void inc_ramster_remote_pers_pages_succ_get(void) { };
+static inline void inc_ramster_remote_eph_pages_unsucc_get(void) { };
+static inline void inc_ramster_remote_pers_pages_unsucc_get(void) { };
+static inline void inc_ramster_pers_pages_remote_nomem(void) { };
+static inline void inc_ramster_remote_objects_flushed(void) { };
+static inline void inc_ramster_remote_object_flushes_failed(void) { };
+static inline void inc_ramster_remote_pages_flushed(void) { };
+static inline void inc_ramster_remote_page_flushes_failed(void) { };
+
 static inline int ramster_debugfs_init(void)
 {
 	return 0;
diff --git a/drivers/staging/zcache/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c
index 1d29f5b..8781627 100644
--- a/drivers/staging/zcache/ramster/ramster.c
+++ b/drivers/staging/zcache/ramster/ramster.c
@@ -156,9 +156,9 @@ int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
 		pr_err("UNTESTED pampd==NULL in ramster_localify\n");
 #endif
 		if (eph)
-			ramster_remote_eph_pages_unsucc_get++;
+			inc_ramster_remote_eph_pages_unsucc_get();
 		else
-			ramster_remote_pers_pages_unsucc_get++;
+			inc_ramster_remote_pers_pages_unsucc_get();
 		obj = NULL;
 		goto finish;
 	} else if (unlikely(!pampd_is_remote(pampd))) {
@@ -167,9 +167,9 @@ int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
 		pr_err("UNTESTED dup while waiting in ramster_localify\n");
 #endif
 		if (eph)
-			ramster_remote_eph_pages_unsucc_get++;
+			inc_ramster_remote_eph_pages_unsucc_get();
 		else
-			ramster_remote_pers_pages_unsucc_get++;
+			inc_ramster_remote_pers_pages_unsucc_get();
 		obj = NULL;
 		pampd = NULL;
 		ret = -EEXIST;
@@ -178,7 +178,7 @@ int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
 		/* no remote data, delete the local is_remote pampd */
 		pampd = NULL;
 		if (eph)
-			ramster_remote_eph_pages_unsucc_get++;
+			inc_ramster_remote_eph_pages_unsucc_get();
 		else
 			BUG();
 		delete = true;
@@ -209,9 +209,9 @@ int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
 	BUG_ON(extra == NULL);
 	zcache_decompress_to_page(data, size, (struct page *)extra);
 	if (eph)
-		ramster_remote_eph_pages_succ_get++;
+		inc_ramster_remote_eph_pages_succ_get();
 	else
-		ramster_remote_pers_pages_succ_get++;
+		inc_ramster_remote_pers_pages_succ_get();
 	ret = 0;
 finish:
 	tmem_localify_finish(obj, index, pampd, saved_hb, delete);
@@ -296,7 +296,7 @@ void *ramster_pampd_repatriate_preload(void *pampd, struct tmem_pool *pool,
 		c = atomic_dec_return(&ramster_remote_pers_pages);
 		WARN_ON_ONCE(c < 0);
 	} else {
-		ramster_pers_pages_remote_nomem++;
+		inc_ramster_pers_pages_remote_nomem();
 	}
 	local_irq_restore(flags);
 out:
@@ -435,9 +435,9 @@ static void ramster_remote_flush_page(struct flushlist_node *flnode)
 	remotenode = flnode->xh.client_id;
 	ret = r2net_remote_flush(xh, remotenode);
 	if (ret >= 0)
-		ramster_remote_pages_flushed++;
+		inc_ramster_remote_pages_flushed();
 	else
-		ramster_remote_page_flushes_failed++;
+		inc_ramster_remote_page_flushes_failed();
 	preempt_enable_no_resched();
 	ramster_flnode_free(flnode, NULL);
 }
@@ -452,9 +452,9 @@ static void ramster_remote_flush_object(struct flushlist_node *flnode)
 	remotenode = flnode->xh.client_id;
 	ret = r2net_remote_flush_object(xh, remotenode);
 	if (ret >= 0)
-		ramster_remote_objects_flushed++;
+		inc_ramster_remote_objects_flushed();
 	else
-		ramster_remote_object_flushes_failed++;
+		inc_ramster_remote_object_flushes_failed();
 	preempt_enable_no_resched();
 	ramster_flnode_free(flnode, NULL);
 }
@@ -505,18 +505,18 @@ int ramster_remotify_pageframe(bool eph)
 		 * But count them so we know if it becomes a problem.
 		 */
 			if (eph)
-				ramster_eph_pages_remote_failed++;
+				inc_ramster_eph_pages_remote_failed();
 			else
-				ramster_pers_pages_remote_failed++;
+				inc_ramster_pers_pages_remote_failed();
 			break;
 		} else {
 			if (!eph)
 				atomic_inc(&ramster_remote_pers_pages);
 		}
 		if (eph)
-			ramster_eph_pages_remoted++;
+			inc_ramster_eph_pages_remoted();
 		else
-			ramster_pers_pages_remoted++;
+			inc_ramster_pers_pages_remoted();
 		/*
 		 * data was successfully remoted so change the local version to
 		 * point to the remote node where it landed
-- 
1.7.10.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-04-13  0:36 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-04-13  0:36 [PATCH PART3 v3 0/6] staging: zcache/ramster: fix and ramster/debugfs improvement Wanpeng Li
2013-04-13  0:36 ` [PATCH PART3 v3 2/6] staging: ramster: Move debugfs code out of ramster.c file Wanpeng Li
2013-04-13  3:07   ` Greg Kroah-Hartman
2013-04-13 12:52     ` Wanpeng Li
2013-04-13 12:52     ` Wanpeng Li
2013-04-13  0:36 ` [PATCH PART3 v3 3/6] staging: ramster/debug: Use an array to initialize/use debugfs attributes Wanpeng Li
2013-04-13  0:36 ` Wanpeng Li [this message]
2013-04-13  0:36 ` [PATCH PART3 v3 5/6] staging: ramster/debug: Add CONFIG_RAMSTER_DEBUG Kconfig entry Wanpeng Li
2013-04-13  0:36 ` [PATCH PART3 v3 6/6] staging: zcache/debug: fix coding style Wanpeng Li
2013-04-13  0:36 ` [PATCH PART2 v2 7/7] staging: ramster: add how-to for ramster Wanpeng Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1365813371-19006-4-git-send-email-liwanp@linux.vnet.ibm.com \
    --to=liwanp@linux.vnet.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=bob.liu@oracle.com \
    --cc=dan.magenheimer@oracle.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=konrad@darnok.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=minchan@kernel.org \
    --cc=sjenning@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).