All of lore.kernel.org
 help / color / mirror / Atom feed
From: Steve Dickson <SteveD@redhat.com>
Cc: Linux NFS Mailing List <nfs@lists.sourceforge.net>
Subject: [PATCH] NFS: Zeroing NFS and kNFSD stats
Date: Tue, 13 Jul 2004 08:24:35 -0400	[thread overview]
Message-ID: <40F3D483.8020507@RedHat.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 1300 bytes --]

Here is a patch I've been using for quite a while now
that will zero out the nfs stats on both the server
and client side.

There are two interface: nfsstat and /proc.
The nfstat interface looks like (from the manpage):
-z     Zeros out all or some of the statistics. Typical uses would be:
     nfsstat -z  - zeros all statistics
     nfsstat -zc - zeros only client statistics
     nfsstat -zs - zeros only server statistics
     nfsstat -zr - zeros only RPC statistics
     nfsstat -zn - zeros only NFS call statistics

Note: the nfs-utils in the Fedora Core 2/3 release already have
this support.

The /proc interface look something like:

echo nfs > /proc/net/rpc/nfs[d] # zeros calls stats
echo rpc > /proc/net/rpc/nfs[d] # zeros rpc stats
echo net > /proc/net/rpc/nfs[d] # zeros connection stats
echo rc  > /proc/net/rpc/nfsd   # zeros server cache stats
echo fh  > /proc/net/rpc/nfsd   # zeros server fh stats

Again this is a patch I've had kicking around for sometime
so its pretty well tested... and I'm a bit dependent on it
in the sense its annoying when I go to zero the stats
and I can't because I forgot to apply the patch...

I'm hopeful that other people will also find this patch useful
as well and it will make its way into an upstream kernel
in the very near future....

SteveD.

[-- Attachment #2: linux-2.6.7-nfs-zerostats.patch --]
[-- Type: text/plain, Size: 7711 bytes --]

--- linux-2.6.7/include/linux/sunrpc/stats.h.orig	2004-06-23 11:00:20.000000000 -0400
+++ linux-2.6.7/include/linux/sunrpc/stats.h	2004-06-23 11:32:09.000000000 -0400
@@ -48,21 +48,34 @@ void			rpc_modcount(struct inode *, int)
 #ifdef CONFIG_PROC_FS
 struct proc_dir_entry *	rpc_proc_register(struct rpc_stat *);
 void			rpc_proc_unregister(const char *);
-void			rpc_proc_zero(struct rpc_program *);
 struct proc_dir_entry *	svc_proc_register(struct svc_stat *,
 					  struct file_operations *);
 void			svc_proc_unregister(const char *);
 
 void			svc_seq_show(struct seq_file *,
 				     const struct svc_stat *);
+void			svc_seq_zero(struct seq_file *,
+				     struct svc_stat *, unsigned int);
 
 extern struct proc_dir_entry	*proc_net_rpc;
+/*
+ * Bits used to zero out status
+ */
+#define PRNT_CALLS  0x0001
+#define PRNT_RPC    0x0002
+#define PRNT_NET    0x0004
+#define PRNT_FH     0x0008
+#define PRNT_RC     0x0010
+#define PRNT_IO     0x0020
+#define PRNT_ALL    0xffff
+
+unsigned int rpc_proc_getval(char *, int , const char *, size_t);
 
 #else
 
 static inline struct proc_dir_entry *rpc_proc_register(struct rpc_stat *s) { return NULL; }
 static inline void rpc_proc_unregister(const char *p) {}
-static inline void rpc_proc_zero(struct rpc_program *p) {}
+static inline rpc_proc_getval(char *, int , const char *, unsigned long) {}
 
 static inline struct proc_dir_entry *svc_proc_register(struct svc_stat *s,
 						       struct file_operations *f) { return NULL; }
@@ -70,6 +83,8 @@ static inline void svc_proc_unregister(c
 
 static inline void svc_seq_show(struct seq_file *seq,
 				const struct svc_stat *st) {}
+static inline void svc_seq_zero(struct seq_file *seq,
+				const struct svc_stat *st, unsigned int opt) {}
 
 #define proc_net_rpc NULL
 
--- linux-2.6.7/fs/nfsd/stats.c.orig	2004-06-23 11:00:20.000000000 -0400
+++ linux-2.6.7/fs/nfsd/stats.c	2004-06-23 11:29:31.000000000 -0400
@@ -74,6 +74,42 @@ static int nfsd_proc_show(struct seq_fil
 
 	return 0;
 }
+#define HEX_DIGITS 8
+
+static ssize_t
+nfsd_proc_zero(struct file *file, const char *buffer, 
+	size_t count, loff_t *data)
+{
+	struct seq_file *seq = (struct seq_file *)file->private_data;
+	char hexnum [HEX_DIGITS];
+	unsigned int opt_prt;
+
+	opt_prt = rpc_proc_getval(hexnum, HEX_DIGITS, buffer, count);
+	if (opt_prt < 0) 
+		return opt_prt;
+
+	if (opt_prt & PRNT_RC) {
+		nfsdstats.rchits = 0;
+		nfsdstats.rcmisses = 0;
+		nfsdstats.rcnocache = 0;
+	}
+	if (opt_prt & PRNT_FH) {
+		nfsdstats.fh_stale = 0;
+		nfsdstats.fh_lookup = 0;
+		nfsdstats.fh_anon = 0;
+		nfsdstats.fh_nocache_dir = 0;
+		nfsdstats.fh_nocache_nondir = 0;
+	}
+	if (opt_prt & PRNT_IO) {
+		nfsdstats.io_read = 0;
+		nfsdstats.io_write = 0;
+	}
+
+	/* zero my rpc info */
+	svc_seq_zero(seq, &nfsd_svcstats, opt_prt);
+
+	return opt_prt;
+}
 
 static int nfsd_proc_open(struct inode *inode, struct file *file)
 {
@@ -84,6 +120,7 @@ static struct file_operations nfsd_proc_
 	.owner = THIS_MODULE,
 	.open = nfsd_proc_open,
 	.read  = seq_read,
+	.write  = nfsd_proc_zero,
 	.llseek = seq_lseek,
 	.release = single_release,
 };
--- linux-2.6.7/net/sunrpc/sunrpc_syms.c.orig	2004-06-16 01:19:52.000000000 -0400
+++ linux-2.6.7/net/sunrpc/sunrpc_syms.c	2004-06-23 11:31:35.000000000 -0400
@@ -94,9 +94,11 @@ EXPORT_SYMBOL(auth_domain_lookup);
 #ifdef CONFIG_PROC_FS
 EXPORT_SYMBOL(rpc_proc_register);
 EXPORT_SYMBOL(rpc_proc_unregister);
+EXPORT_SYMBOL_GPL(rpc_proc_getval);
 EXPORT_SYMBOL(svc_proc_register);
 EXPORT_SYMBOL(svc_proc_unregister);
 EXPORT_SYMBOL(svc_seq_show);
+EXPORT_SYMBOL_GPL(svc_seq_zero);
 #endif
 
 /* caching... */
--- linux-2.6.7/net/sunrpc/stats.c.orig	2004-06-23 11:00:20.000000000 -0400
+++ linux-2.6.7/net/sunrpc/stats.c	2004-06-23 11:18:55.000000000 -0400
@@ -21,9 +21,55 @@
 #include <linux/seq_file.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svcsock.h>
+#include <asm/uaccess.h>
 
 #define RPCDBG_FACILITY	RPCDBG_MISC
 
+
+#define HEX_DIGITS 8
+unsigned int 
+rpc_proc_getval(char *val, int vallen, const char *buf, size_t cnt)
+{
+	unsigned int new_value = 0;
+	int i;
+
+	if (!cnt)
+		return -EINVAL;
+	if (cnt > vallen)
+		cnt = vallen;
+	if (copy_from_user(val, buf, cnt))
+		return -EFAULT;
+	val[vallen-1] = '\0';
+
+	if (strcmp(val, "nfs") == 0) {
+		new_value = PRNT_CALLS;
+	} else if (strcmp(val, "rpc") == 0) {
+		new_value = PRNT_RPC;
+	} else if (strcmp(val, "net") == 0) {
+		new_value = PRNT_NET;
+	} else if (strcmp(val, "fh") == 0) {
+		new_value = PRNT_FH;
+	} else if (strcmp(val, "rc") == 0) {
+		new_value = PRNT_RC;
+	} else {
+		new_value = 0;
+		for (i = 0; i < cnt; i++) {
+			unsigned int c = val[i];
+
+			switch (c) {
+				case '0' ... '9': c -= '0'; break;
+				case 'a' ... 'f': c -= 'a'-10; break;
+				case 'A' ... 'F': c -= 'A'-10; break;
+			default:
+				goto out;
+			}
+			new_value = (new_value << 4) | c;
+		}
+	}
+out:
+	return new_value;
+}
+
 struct proc_dir_entry	*proc_net_rpc = NULL;
 
 /*
@@ -60,6 +106,45 @@ static int rpc_proc_show(struct seq_file
 	return 0;
 }
 
+static ssize_t
+rpc_proc_zero(struct file *file, const char *buffer, 
+	size_t count, loff_t *data)
+{
+	struct rpc_stat *statp = ((struct seq_file *)file->private_data)->private;
+	struct rpc_program *prog = statp->program;
+	char hexnum [HEX_DIGITS];
+	unsigned int opt_prt;
+	int i, j;
+
+	opt_prt = rpc_proc_getval(hexnum, HEX_DIGITS, buffer, count);
+	if (opt_prt < 0) 
+		return opt_prt;
+
+	dprintk("RPC: zeroing bits 0x%x\n", opt_prt);
+
+	if (opt_prt & PRNT_NET) {
+		statp->netcnt = 0,
+		statp->netudpcnt = 0,
+		statp->nettcpcnt = 0,
+		statp->nettcpconn = 0;
+	}
+	if (opt_prt & PRNT_RPC) {
+		statp->rpccnt = 0,
+		statp->rpcretrans = 0,
+		statp->rpcauthrefresh = 0;
+	}
+	if (opt_prt & PRNT_CALLS) {
+		for (i = 0; i < prog->nrvers; i++) {
+			const struct rpc_version *vers = prog->version[i];
+			if (!vers)
+				continue;
+			for (j = 0; j < vers->nrprocs; j++)
+					vers->procs[j].p_count = 0;
+		}
+	}
+	return opt_prt;
+}
+
 static int rpc_proc_open(struct inode *inode, struct file *file)
 {
 	return single_open(file, rpc_proc_show, PDE(inode)->data);
@@ -69,6 +154,7 @@ static struct file_operations rpc_proc_f
 	.owner = THIS_MODULE,
 	.open = rpc_proc_open,
 	.read  = seq_read,
+	.write  = rpc_proc_zero,
 	.llseek = seq_lseek,
 	.release = single_release,
 };
@@ -105,6 +191,36 @@ void svc_seq_show(struct seq_file *seq, 
 		seq_putc(seq, '\n');
 	}
 }
+/*
+ * Zero RPC server stats
+ */
+void svc_seq_zero(struct seq_file *seq, struct svc_stat *statp, unsigned int opt_prt) {
+	struct svc_program *prog = statp->program;
+	struct svc_procedure *proc;
+	const struct svc_version *vers;
+	int		i, j;
+
+	if (opt_prt & PRNT_NET) {
+		statp->netcnt = 0;
+		statp->netudpcnt = 0;
+		statp->nettcpcnt = 0;
+		statp->nettcpconn = 0;
+	}
+	if (opt_prt & PRNT_RPC) {
+		statp->rpccnt = 0;
+		statp->rpcbadfmt = statp->rpcbadauth = statp->rpcbadclnt = 0;
+		statp->rpcbadfmt = 0;
+		statp->rpcbadauth = 0;
+		statp->rpcbadclnt = 0;;
+	}
+
+	for (i = 0; i < prog->pg_nvers; i++) {
+		if (!(vers = prog->pg_vers[i]) || !(proc = vers->vs_proc))
+			continue;
+		for (j = 0; j < vers->vs_nproc; j++, proc++)
+			proc->pc_count = 0;
+	}
+}
 
 /*
  * Register/unregister RPC proc files
@@ -113,11 +229,12 @@ static inline struct proc_dir_entry *
 do_register(const char *name, void *data, struct file_operations *fops)
 {
 	struct proc_dir_entry *ent;
+	mode_t mode;
 
 	rpc_proc_init();
 	dprintk("RPC: registering /proc/net/rpc/%s\n", name);
-
-	ent = create_proc_entry(name, 0, proc_net_rpc);
+	mode = (fops->write != NULL ? 0644 : 0);
+	ent = create_proc_entry(name, mode, proc_net_rpc);
 	if (ent) {
 		ent->proc_fops = fops;
 		ent->data = data;

             reply	other threads:[~2004-07-13 12:24 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2004-07-13 12:24 Steve Dickson [this message]
2004-07-13 14:30 ` [PATCH] NFS: Zeroing NFS and kNFSD stats J. Bruce Fields
2004-07-13 15:08   ` Steve Dickson
2004-07-13 15:17     ` J. Bruce Fields
2004-07-13 18:01       ` J. Bruce Fields
2004-07-13 20:16         ` Steve Dickson
2004-07-14 23:20           ` J. Bruce Fields
2004-07-16  0:39             ` Ben Woodard
2004-07-13 21:09         ` Garrick Staples
2004-07-14 23:24           ` J. Bruce Fields
2004-08-02 10:39       ` Olaf Kirch
2004-08-04 20:14         ` J. Bruce Fields
2004-08-05  5:26           ` Greg Banks

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=40F3D483.8020507@RedHat.com \
    --to=steved@redhat.com \
    --cc=nfs@lists.sourceforge.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.