kernel-janitors.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 00/13] staging: lustre: lnet: code cleanups
@ 2015-05-21 19:50 Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
                   ` (22 more replies)
  0 siblings, 23 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve

This patch series cleans up code in staging/lustre/lnet - mostly spacing
and dead function cleanup.  Should apply against linux-next 20150518.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* [PATCH v4 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks Mike Shuey
                   ` (21 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Unify variable declarations to use a single space, and any other obvious
spacing flaws.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/acceptor.c    |   32 ++--
 drivers/staging/lustre/lnet/lnet/api-ni.c      |  198 ++++++++++----------
 drivers/staging/lustre/lnet/lnet/config.c      |  238 ++++++++++++------------
 drivers/staging/lustre/lnet/lnet/lib-eq.c      |   42 ++--
 drivers/staging/lustre/lnet/lnet/lib-md.c      |   26 ++--
 drivers/staging/lustre/lnet/lnet/lib-me.c      |   20 +-
 drivers/staging/lustre/lnet/lnet/lib-move.c    |  240 ++++++++++++------------
 drivers/staging/lustre/lnet/lnet/lib-msg.c     |   50 +++---
 drivers/staging/lustre/lnet/lnet/lib-ptl.c     |  116 ++++++------
 drivers/staging/lustre/lnet/lnet/lo.c          |    2 +-
 drivers/staging/lustre/lnet/lnet/module.c      |    8 +-
 drivers/staging/lustre/lnet/lnet/peer.c        |   50 +++---
 drivers/staging/lustre/lnet/lnet/router.c      |  224 +++++++++++-----------
 drivers/staging/lustre/lnet/lnet/router_proc.c |  210 +++++++++++-----------
 14 files changed, 728 insertions(+), 728 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 72fd1bf..69d4b19 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -143,10 +143,10 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
 	    __u32 local_ip, __u32 peer_ip, int peer_port)
 {
 	lnet_acceptor_connreq_t cr;
-	struct socket	   *sock;
-	int		     rc;
-	int		     port;
-	int		     fatal;
+	struct socket *sock;
+	int rc;
+	int port;
+	int fatal;
 
 	CLASSERT(sizeof(cr) <= 16);	    /* not too big to be on the stack */
 
@@ -211,12 +211,12 @@ static int
 lnet_accept(struct socket *sock, __u32 magic)
 {
 	lnet_acceptor_connreq_t cr;
-	__u32		   peer_ip;
-	int		     peer_port;
-	int		     rc;
-	int		     flip;
-	lnet_ni_t	      *ni;
-	char		   *str;
+	__u32 peer_ip;
+	int peer_port;
+	int rc;
+	int flip;
+	lnet_ni_t *ni;
+	char *str;
 
 	LASSERT(sizeof(cr) <= 16);	     /* not too big for the stack */
 
@@ -333,11 +333,11 @@ static int
 lnet_acceptor(void *arg)
 {
 	struct socket *newsock;
-	int	    rc;
-	__u32	  magic;
-	__u32	  peer_ip;
-	int	    peer_port;
-	int	    secure = (int)((long_ptr_t)arg);
+	int rc;
+	__u32 magic;
+	__u32 peer_ip;
+	int peer_port;
+	int secure = (int)((long_ptr_t)arg);
 
 	LASSERT(lnet_acceptor_state.pta_sock = NULL);
 
@@ -444,7 +444,7 @@ accept2secure(const char *acc, long *sec)
 int
 lnet_acceptor_start(void)
 {
-	int  rc;
+	int rc;
 	long rc2;
 	long secure;
 
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 4a14e51..6910f56 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -41,7 +41,7 @@
 
 #define D_LNI D_CONSOLE
 
-lnet_t      the_lnet;			   /* THE state of the network */
+lnet_t the_lnet;			   /* THE state of the network */
 EXPORT_SYMBOL(the_lnet);
 
 
@@ -70,8 +70,8 @@ lnet_get_routes(void)
 static char *
 lnet_get_networks(void)
 {
-	char   *nets;
-	int     rc;
+	char *nets;
+	int rc;
 
 	if (*networks != 0 && *ip2nets != 0) {
 		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
@@ -107,8 +107,8 @@ lnet_fini_locks(void)
 static int
 lnet_create_remote_nets_table(void)
 {
-	int		i;
-	struct list_head	*hash;
+	int i;
+	struct list_head *hash;
 
 	LASSERT(the_lnet.ln_remote_nets_hash = NULL);
 	LASSERT(the_lnet.ln_remote_nets_hbits > 0);
@@ -273,8 +273,8 @@ static void lnet_assert_wire_constants(void)
 static lnd_t *
 lnet_find_lnd_by_type(int type)
 {
-	lnd_t	      *lnd;
-	struct list_head	 *tmp;
+	lnd_t *lnd;
+	struct list_head *tmp;
 
 	/* holding lnd mutex */
 	list_for_each(tmp, &the_lnet.ln_lnds) {
@@ -325,7 +325,7 @@ void
 lnet_counters_get(lnet_counters_t *counters)
 {
 	lnet_counters_t *ctr;
-	int		i;
+	int i;
 
 	memset(counters, 0, sizeof(*counters));
 
@@ -353,7 +353,7 @@ void
 lnet_counters_reset(void)
 {
 	lnet_counters_t *counters;
-	int		i;
+	int i;
 
 	lnet_net_lock(LNET_LOCK_EX);
 
@@ -396,8 +396,8 @@ lnet_freelist_init(lnet_freelist_t *fl, int n, int size)
 void
 lnet_freelist_fini(lnet_freelist_t *fl)
 {
-	struct list_head       *el;
-	int	       count;
+	struct list_head *el;
+	int count;
 
 	if (fl->fl_nobjs = 0)
 		return;
@@ -441,7 +441,7 @@ lnet_res_type2str(int type)
 static void
 lnet_res_container_cleanup(struct lnet_res_container *rec)
 {
-	int	count = 0;
+	int count = 0;
 
 	if (rec->rec_type = 0) /* not set yet, it's uninitialized */
 		return;
@@ -486,8 +486,8 @@ static int
 lnet_res_container_setup(struct lnet_res_container *rec,
 			 int cpt, int type, int objnum, int objsz)
 {
-	int	rc = 0;
-	int	i;
+	int rc = 0;
+	int i;
 
 	LASSERT(rec->rec_type = 0);
 
@@ -525,8 +525,8 @@ out:
 static void
 lnet_res_containers_destroy(struct lnet_res_container **recs)
 {
-	struct lnet_res_container	*rec;
-	int				i;
+	struct lnet_res_container *rec;
+	int i;
 
 	cfs_percpt_for_each(rec, i, recs)
 		lnet_res_container_cleanup(rec);
@@ -537,10 +537,10 @@ lnet_res_containers_destroy(struct lnet_res_container **recs)
 static struct lnet_res_container **
 lnet_res_containers_create(int type, int objnum, int objsz)
 {
-	struct lnet_res_container	**recs;
-	struct lnet_res_container	*rec;
-	int				rc;
-	int				i;
+	struct lnet_res_container **recs;
+	struct lnet_res_container *rec;
+	int rc;
+	int i;
 
 	recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
 	if (recs = NULL) {
@@ -564,9 +564,9 @@ lnet_libhandle_t *
 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
 {
 	/* ALWAYS called with lnet_res_lock held */
-	struct list_head		*head;
-	lnet_libhandle_t	*lh;
-	unsigned int		hash;
+	struct list_head *head;
+	lnet_libhandle_t *lh;
+	unsigned int hash;
 
 	if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
 		return NULL;
@@ -586,8 +586,8 @@ void
 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
 {
 	/* ALWAYS called with lnet_res_lock held */
-	unsigned int	ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
-	unsigned int	hash;
+	unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
+	unsigned int hash;
 
 	lh->lh_cookie = rec->rec_lh_cookie;
 	rec->rec_lh_cookie += 1 << ibits;
@@ -605,7 +605,7 @@ lnet_prepare(lnet_pid_t requested_pid)
 {
 	/* Prepare to bring up the network */
 	struct lnet_res_container **recs;
-	int			  rc = 0;
+	int rc = 0;
 
 	LASSERT(the_lnet.ln_refcount = 0);
 
@@ -725,8 +725,8 @@ lnet_unprepare(void)
 lnet_ni_t  *
 lnet_net2ni_locked(__u32 net, int cpt)
 {
-	struct list_head	*tmp;
-	lnet_ni_t	*ni;
+	struct list_head *tmp;
+	lnet_ni_t *ni;
 
 	LASSERT(cpt != LNET_LOCK_EX);
 
@@ -758,8 +758,8 @@ EXPORT_SYMBOL(lnet_net2ni);
 static unsigned int
 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
 {
-	__u64		key = nid;
-	unsigned int	val;
+	__u64 key = nid;
+	unsigned int val;
 
 	LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
 
@@ -801,8 +801,8 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid)
 int
 lnet_cpt_of_nid(lnet_nid_t nid)
 {
-	int	cpt;
-	int	cpt2;
+	int cpt;
+	int cpt2;
 
 	if (LNET_CPT_NUMBER = 1)
 		return 0; /* the only one */
@@ -821,8 +821,8 @@ EXPORT_SYMBOL(lnet_cpt_of_nid);
 int
 lnet_islocalnet(__u32 net)
 {
-	struct lnet_ni	*ni;
-	int		cpt;
+	struct lnet_ni *ni;
+	int cpt;
 
 	cpt = lnet_net_lock_current();
 
@@ -838,8 +838,8 @@ lnet_islocalnet(__u32 net)
 lnet_ni_t  *
 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
 {
-	struct lnet_ni	*ni;
-	struct list_head	*tmp;
+	struct lnet_ni *ni;
+	struct list_head *tmp;
 
 	LASSERT(cpt != LNET_LOCK_EX);
 
@@ -858,8 +858,8 @@ lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
 int
 lnet_islocalnid(lnet_nid_t nid)
 {
-	struct lnet_ni	*ni;
-	int		cpt;
+	struct lnet_ni *ni;
+	int cpt;
 
 	cpt = lnet_net_lock_current();
 	ni = lnet_nid2ni_locked(nid, cpt);
@@ -874,10 +874,10 @@ int
 lnet_count_acceptor_nis(void)
 {
 	/* Return the # of NIs that need the acceptor. */
-	int		count = 0;
-	struct list_head	*tmp;
-	struct lnet_ni	*ni;
-	int		cpt;
+	int count = 0;
+	struct list_head *tmp;
+	struct lnet_ni *ni;
+	int cpt;
 
 	cpt = lnet_net_lock_current();
 	list_for_each(tmp, &the_lnet.ln_nis) {
@@ -895,7 +895,7 @@ lnet_count_acceptor_nis(void)
 static int
 lnet_ni_tq_credits(lnet_ni_t *ni)
 {
-	int	credits;
+	int credits;
 
 	LASSERT(ni->ni_ncpts >= 1);
 
@@ -912,9 +912,9 @@ lnet_ni_tq_credits(lnet_ni_t *ni)
 static void
 lnet_shutdown_lndnis(void)
 {
-	int		i;
-	int		islo;
-	lnet_ni_t	 *ni;
+	int i;
+	int islo;
+	lnet_ni_t *ni;
 
 	/* NB called holding the global mutex */
 
@@ -968,8 +968,8 @@ lnet_shutdown_lndnis(void)
 	 * and shut them down in guaranteed thread context */
 	i = 2;
 	while (!list_empty(&the_lnet.ln_nis_zombie)) {
-		int	*ref;
-		int	j;
+		int *ref;
+		int j;
 
 		ni = list_entry(the_lnet.ln_nis_zombie.next,
 				    lnet_ni_t, ni_list);
@@ -1029,15 +1029,15 @@ lnet_shutdown_lndnis(void)
 static int
 lnet_startup_lndnis(void)
 {
-	lnd_t			*lnd;
-	struct lnet_ni		*ni;
-	struct lnet_tx_queue	*tq;
-	struct list_head		nilist;
-	int			i;
-	int		rc = 0;
-	int		lnd_type;
-	int		nicount = 0;
-	char	      *nets = lnet_get_networks();
+	lnd_t *lnd;
+	struct lnet_ni *ni;
+	struct lnet_tx_queue *tq;
+	struct list_head nilist;
+	int i;
+	int rc = 0;
+	int lnd_type;
+	int nicount = 0;
+	char *nets = lnet_get_networks();
 
 	INIT_LIST_HEAD(&nilist);
 
@@ -1181,7 +1181,7 @@ lnet_startup_lndnis(void)
 int
 LNetInit(void)
 {
-	int	rc;
+	int rc;
 
 	lnet_assert_wire_constants();
 	LASSERT(!the_lnet.ln_init);
@@ -1277,8 +1277,8 @@ EXPORT_SYMBOL(LNetFini);
 int
 LNetNIInit(lnet_pid_t requested_pid)
 {
-	int	 im_a_router = 0;
-	int	 rc;
+	int im_a_router = 0;
+	int rc;
 
 	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
 
@@ -1413,9 +1413,9 @@ int
 LNetCtl(unsigned int cmd, void *arg)
 {
 	struct libcfs_ioctl_data *data = arg;
-	lnet_process_id_t	 id = {0};
-	lnet_ni_t		*ni;
-	int		       rc;
+	lnet_process_id_t id = {0};
+	lnet_ni_t *ni;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -1531,10 +1531,10 @@ EXPORT_SYMBOL(LNetCtl);
 int
 LNetGetId(unsigned int index, lnet_process_id_t *id)
 {
-	struct lnet_ni	*ni;
-	struct list_head	*tmp;
-	int		cpt;
-	int		rc = -ENOENT;
+	struct lnet_ni *ni;
+	struct list_head *tmp;
+	int cpt;
+	int rc = -ENOENT;
 
 	LASSERT(the_lnet.ln_init);
 
@@ -1575,11 +1575,11 @@ EXPORT_SYMBOL(LNetSnprintHandle);
 static int
 lnet_create_ping_info(void)
 {
-	int	       i;
-	int	       n;
-	int	       rc;
-	unsigned int      infosz;
-	lnet_ni_t	*ni;
+	int i;
+	int n;
+	int rc;
+	unsigned int infosz;
+	lnet_ni_t *ni;
 	lnet_process_id_t id;
 	lnet_ping_info_t *pinfo;
 
@@ -1633,7 +1633,7 @@ lnet_create_ping_info(void)
 static void
 lnet_destroy_ping_info(void)
 {
-	struct lnet_ni	*ni;
+	struct lnet_ni *ni;
 
 	lnet_net_lock(0);
 
@@ -1654,12 +1654,12 @@ lnet_destroy_ping_info(void)
 int
 lnet_ping_target_init(void)
 {
-	lnet_md_t	 md = { NULL };
-	lnet_handle_me_t  meh;
+	lnet_md_t md = { NULL };
+	lnet_handle_me_t meh;
 	lnet_process_id_t id;
-	int	       rc;
-	int	       rc2;
-	int	       infosz;
+	int rc;
+	int rc2;
+	int infosz;
 
 	rc = lnet_create_ping_info();
 	if (rc != 0)
@@ -1722,11 +1722,11 @@ lnet_ping_target_init(void)
 void
 lnet_ping_target_fini(void)
 {
-	lnet_event_t    event;
-	int	     rc;
-	int	     which;
-	int	     timeout_ms = 1000;
-	sigset_t    blocked = cfs_block_allsigs();
+	lnet_event_t event;
+	int rc;
+	int which;
+	int timeout_ms = 1000;
+	sigset_t blocked = cfs_block_allsigs();
 
 	LNetMDUnlink(the_lnet.ln_ping_target_md);
 	/* NB md could be busy; this just starts the unlink */
@@ -1759,22 +1759,22 @@ lnet_ping_target_fini(void)
 int
 lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
 {
-	lnet_handle_eq_t     eqh;
-	lnet_handle_md_t     mdh;
-	lnet_event_t	 event;
-	lnet_md_t	    md = { NULL };
-	int		  which;
-	int		  unlinked = 0;
-	int		  replied = 0;
-	const int	    a_long_time = 60000; /* mS */
-	int		  infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
-	lnet_ping_info_t    *info;
-	lnet_process_id_t    tmpid;
-	int		  i;
-	int		  nob;
-	int		  rc;
-	int		  rc2;
-	sigset_t	 blocked;
+	lnet_handle_eq_t eqh;
+	lnet_handle_md_t mdh;
+	lnet_event_t event;
+	lnet_md_t md = { NULL };
+	int which;
+	int unlinked = 0;
+	int replied = 0;
+	const int a_long_time = 60000; /* mS */
+	int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+	lnet_ping_info_t *info;
+	lnet_process_id_t tmpid;
+	int i;
+	int nob;
+	int rc;
+	int rc2;
+	sigset_t blocked;
 
 	if (n_ids <= 0 ||
 	    id.nid = LNET_NID_ANY ||
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 2dc4c4a..70bc809 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -38,9 +38,9 @@
 #include "../../include/linux/lnet/lib-lnet.h"
 
 struct lnet_text_buf_t {	    /* tmp struct for parsing routes */
-	struct list_head	 ltb_list;	/* stash on lists */
-	int		ltb_size;	/* allocated size */
-	char	       ltb_text[0];     /* text buffer */
+	struct list_head ltb_list;	/* stash on lists */
+	int ltb_size;	/* allocated size */
+	char ltb_text[0];     /* text buffer */
 };
 
 static int lnet_tbnob;			/* track text buf allocation */
@@ -80,8 +80,8 @@ lnet_issep(char c)
 static int
 lnet_net_unique(__u32 net, struct list_head *nilist)
 {
-	struct list_head       *tmp;
-	lnet_ni_t	*ni;
+	struct list_head *tmp;
+	lnet_ni_t *ni;
 
 	list_for_each(tmp, nilist) {
 		ni = list_entry(tmp, lnet_ni_t, ni_list);
@@ -111,10 +111,10 @@ lnet_ni_free(struct lnet_ni *ni)
 static lnet_ni_t *
 lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
 {
-	struct lnet_tx_queue	*tq;
-	struct lnet_ni		*ni;
-	int			rc;
-	int			i;
+	struct lnet_tx_queue *tq;
+	struct lnet_ni *ni;
+	int rc;
+	int i;
 
 	if (!lnet_net_unique(net, nilist)) {
 		LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
@@ -178,13 +178,13 @@ int
 lnet_parse_networks(struct list_head *nilist, char *networks)
 {
 	struct cfs_expr_list *el = NULL;
-	int		tokensize = strlen(networks) + 1;
-	char		*tokens;
-	char		*str;
-	char		*tmp;
-	struct lnet_ni	*ni;
-	__u32		net;
-	int		nnets = 0;
+	int tokensize = strlen(networks) + 1;
+	char *tokens;
+	char *str;
+	char *tmp;
+	struct lnet_ni *ni;
+	__u32 net;
+	int nnets = 0;
 
 	if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
 		/* _WAY_ conservative */
@@ -210,12 +210,12 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
 		goto failed;
 
 	while (str != NULL && *str != 0) {
-		char	*comma = strchr(str, ',');
-		char	*bracket = strchr(str, '(');
-		char	*square = strchr(str, '[');
-		char	*iface;
-		int	niface;
-		int	rc;
+		char *comma = strchr(str, ',');
+		char *bracket = strchr(str, '(');
+		char *square = strchr(str, '[');
+		char *iface;
+		int niface;
+		int rc;
 
 		/* NB we don't check interface conflicts here; it's the LNDs
 		 * responsibility (if it cares at all) */
@@ -369,7 +369,7 @@ static struct lnet_text_buf_t *
 lnet_new_text_buf(int str_len)
 {
 	struct lnet_text_buf_t *ltb;
-	int	      nob;
+	int nob;
 
 	/* NB allocate space for the terminating 0 */
 	nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]);
@@ -404,7 +404,7 @@ lnet_free_text_buf(struct lnet_text_buf_t *ltb)
 static void
 lnet_free_text_bufs(struct list_head *tbs)
 {
-	struct lnet_text_buf_t  *ltb;
+	struct lnet_text_buf_t *ltb;
 
 	while (!list_empty(tbs)) {
 		ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
@@ -417,11 +417,11 @@ lnet_free_text_bufs(struct list_head *tbs)
 static int
 lnet_str2tbs_sep(struct list_head *tbs, char *str)
 {
-	struct list_head	pending;
-	char	     *sep;
-	int	       nob;
-	int	       i;
-	struct lnet_text_buf_t  *ltb;
+	struct list_head pending;
+	char *sep;
+	int nob;
+	int i;
+	struct lnet_text_buf_t *ltb;
 
 	INIT_LIST_HEAD(&pending);
 
@@ -477,8 +477,8 @@ lnet_expand1tb(struct list_head *list,
 	       char *str, char *sep1, char *sep2,
 	       char *item, int itemlen)
 {
-	int	      len1 = (int)(sep1 - str);
-	int	      len2 = strlen(sep2 + 1);
+	int len1 = (int)(sep1 - str);
+	int len2 = strlen(sep2 + 1);
 	struct lnet_text_buf_t *ltb;
 
 	LASSERT(*sep1 = '[');
@@ -500,18 +500,18 @@ lnet_expand1tb(struct list_head *list,
 static int
 lnet_str2tbs_expand(struct list_head *tbs, char *str)
 {
-	char	      num[16];
-	struct list_head	pending;
-	char	     *sep;
-	char	     *sep2;
-	char	     *parsed;
-	char	     *enditem;
-	int	       lo;
-	int	       hi;
-	int	       stride;
-	int	       i;
-	int	       nob;
-	int	       scanned;
+	char num[16];
+	struct list_head pending;
+	char *sep;
+	char *sep2;
+	char *parsed;
+	char *enditem;
+	int lo;
+	int hi;
+	int stride;
+	int i;
+	int nob;
+	int scanned;
 
 	INIT_LIST_HEAD(&pending);
 
@@ -584,8 +584,8 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
 static int
 lnet_parse_hops(char *str, unsigned int *hops)
 {
-	int     len = strlen(str);
-	int     nob = len;
+	int len = strlen(str);
+	int nob = len;
 
 	return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
 		nob = len &&
@@ -597,9 +597,9 @@ lnet_parse_hops(char *str, unsigned int *hops)
 static int
 lnet_parse_priority(char *str, unsigned int *priority, char **token)
 {
-	int   nob;
+	int nob;
 	char *sep;
-	int   len;
+	int len;
 
 	sep = strchr(str, LNET_PRIORITY_SEPARATOR);
 	if (sep = NULL) {
@@ -628,23 +628,23 @@ static int
 lnet_parse_route(char *str, int *im_a_router)
 {
 	/* static scratch buffer OK (single threaded) */
-	static char       cmd[LNET_SINGLE_TEXTBUF_NOB];
-
-	struct list_head	nets;
-	struct list_head	gateways;
-	struct list_head       *tmp1;
-	struct list_head       *tmp2;
-	__u32	     net;
-	lnet_nid_t	nid;
-	struct lnet_text_buf_t  *ltb;
-	int	       rc;
-	char	     *sep;
-	char	     *token = str;
-	int	       ntokens = 0;
-	int	       myrc = -1;
-	unsigned int      hops;
-	int	       got_hops = 0;
-	unsigned int	  priority = 0;
+	static char cmd[LNET_SINGLE_TEXTBUF_NOB];
+
+	struct list_head nets;
+	struct list_head gateways;
+	struct list_head *tmp1;
+	struct list_head *tmp2;
+	__u32 net;
+	lnet_nid_t nid;
+	struct lnet_text_buf_t *ltb;
+	int rc;
+	char *sep;
+	char *token = str;
+	int ntokens = 0;
+	int myrc = -1;
+	unsigned int hops;
+	int got_hops = 0;
+	unsigned int priority = 0;
 
 	INIT_LIST_HEAD(&gateways);
 	INIT_LIST_HEAD(&nets);
@@ -772,7 +772,7 @@ lnet_parse_route(char *str, int *im_a_router)
 static int
 lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
 {
-	struct lnet_text_buf_t   *ltb;
+	struct lnet_text_buf_t *ltb;
 
 	while (!list_empty(tbs)) {
 		ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
@@ -792,8 +792,8 @@ lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
 int
 lnet_parse_routes(char *routes, int *im_a_router)
 {
-	struct list_head	tbs;
-	int	       rc = 0;
+	struct list_head tbs;
+	int rc = 0;
 
 	*im_a_router = 0;
 
@@ -814,8 +814,8 @@ static int
 lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
 {
 	LIST_HEAD(list);
-	int		rc;
-	int		i;
+	int rc;
+	int i;
 
 	rc = cfs_ip_addr_parse(token, len, &list);
 	if (rc != 0)
@@ -834,13 +834,13 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
 {
 	static char tokens[LNET_SINGLE_TEXTBUF_NOB];
 
-	int   matched = 0;
-	int   ntokens = 0;
-	int   len;
+	int matched = 0;
+	int ntokens = 0;
+	int len;
 	char *net = NULL;
 	char *sep;
 	char *token;
-	int   rc;
+	int rc;
 
 	LASSERT(strlen(net_entry) < sizeof(tokens));
 
@@ -889,8 +889,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
 static __u32
 lnet_netspec2net(char *netspec)
 {
-	char   *bracket = strchr(netspec, '(');
-	__u32   net;
+	char *bracket = strchr(netspec, '(');
+	__u32 net;
 
 	if (bracket != NULL)
 		*bracket = 0;
@@ -906,15 +906,15 @@ lnet_netspec2net(char *netspec)
 static int
 lnet_splitnets(char *source, struct list_head *nets)
 {
-	int	       offset = 0;
-	int	       offset2;
-	int	       len;
-	struct lnet_text_buf_t  *tb;
-	struct lnet_text_buf_t  *tb2;
-	struct list_head       *t;
-	char	     *sep;
-	char	     *bracket;
-	__u32	     net;
+	int offset = 0;
+	int offset2;
+	int len;
+	struct lnet_text_buf_t *tb;
+	struct lnet_text_buf_t *tb2;
+	struct list_head *t;
+	char *sep;
+	char *bracket;
+	__u32 net;
 
 	LASSERT(!list_empty(nets));
 	LASSERT(nets->next = nets->prev);     /* single entry */
@@ -986,22 +986,22 @@ lnet_splitnets(char *source, struct list_head *nets)
 static int
 lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
 {
-	static char	networks[LNET_SINGLE_TEXTBUF_NOB];
-	static char	source[LNET_SINGLE_TEXTBUF_NOB];
-
-	struct list_head	  raw_entries;
-	struct list_head	  matched_nets;
-	struct list_head	  current_nets;
-	struct list_head	 *t;
-	struct list_head	 *t2;
-	struct lnet_text_buf_t    *tb;
-	struct lnet_text_buf_t    *tb2;
-	__u32	       net1;
-	__u32	       net2;
-	int		 len;
-	int		 count;
-	int		 dup;
-	int		 rc;
+	static char networks[LNET_SINGLE_TEXTBUF_NOB];
+	static char source[LNET_SINGLE_TEXTBUF_NOB];
+
+	struct list_head raw_entries;
+	struct list_head matched_nets;
+	struct list_head current_nets;
+	struct list_head *t;
+	struct list_head *t2;
+	struct lnet_text_buf_t *tb;
+	struct lnet_text_buf_t *tb2;
+	__u32 net1;
+	__u32 net2;
+	int len;
+	int count;
+	int dup;
+	int rc;
 
 	INIT_LIST_HEAD(&raw_entries);
 	if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
@@ -1112,15 +1112,15 @@ lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
 static int
 lnet_ipaddr_enumerate(__u32 **ipaddrsp)
 {
-	int	up;
-	__u32      netmask;
-	__u32     *ipaddrs;
-	__u32     *ipaddrs2;
-	int	nip;
-	char     **ifnames;
-	int	nif = libcfs_ipif_enumerate(&ifnames);
-	int	i;
-	int	rc;
+	int up;
+	__u32 netmask;
+	__u32 *ipaddrs;
+	__u32 *ipaddrs2;
+	int nip;
+	char **ifnames;
+	int nif = libcfs_ipif_enumerate(&ifnames);
+	int i;
+	int rc;
 
 	if (nif <= 0)
 		return nif;
@@ -1178,9 +1178,9 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
 int
 lnet_parse_ip2nets(char **networksp, char *ip2nets)
 {
-	__u32     *ipaddrs = NULL;
-	int	nip = lnet_ipaddr_enumerate(&ipaddrs);
-	int	rc;
+	__u32 *ipaddrs = NULL;
+	int nip = lnet_ipaddr_enumerate(&ipaddrs);
+	int rc;
 
 	if (nip < 0) {
 		LCONSOLE_ERROR_MSG(0x117,
@@ -1215,14 +1215,14 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
 int
 lnet_set_ip_niaddr(lnet_ni_t *ni)
 {
-	__u32  net = LNET_NIDNET(ni->ni_nid);
+	__u32 net = LNET_NIDNET(ni->ni_nid);
 	char **names;
-	int    n;
-	__u32  ip;
-	__u32  netmask;
-	int    up;
-	int    i;
-	int    rc;
+	int n;
+	__u32 ip;
+	__u32 netmask;
+	int up;
+	int i;
+	int rc;
 
 	/* Convenience for LNDs that use the IP address of a local interface as
 	 * the local address part of their NID */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 5470148..368bde9 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -70,7 +70,7 @@ int
 LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
 	    lnet_handle_eq_t *handle)
 {
-	lnet_eq_t     *eq;
+	lnet_eq_t *eq;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -151,13 +151,13 @@ EXPORT_SYMBOL(LNetEQAlloc);
 int
 LNetEQFree(lnet_handle_eq_t eqh)
 {
-	struct lnet_eq	*eq;
-	lnet_event_t	*events = NULL;
-	int		**refs = NULL;
-	int		*ref;
-	int		rc = 0;
-	int		size = 0;
-	int		i;
+	struct lnet_eq *eq;
+	lnet_event_t *events = NULL;
+	int **refs = NULL;
+	int *ref;
+	int rc = 0;
+	int size = 0;
+	int i;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -185,9 +185,9 @@ LNetEQFree(lnet_handle_eq_t eqh)
 	}
 
 	/* stash for free after lock dropped */
-	events	= eq->eq_events;
-	size	= eq->eq_size;
-	refs	= eq->eq_refs;
+	events = eq->eq_events;
+	size = eq->eq_size;
+	refs = eq->eq_refs;
 
 	lnet_res_lh_invalidate(&eq->eq_lh);
 	list_del(&eq->eq_list);
@@ -237,9 +237,9 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
 static int
 lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
 {
-	int		new_index = eq->eq_deq_seq & (eq->eq_size - 1);
-	lnet_event_t	*new_event = &eq->eq_events[new_index];
-	int		rc;
+	int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
+	lnet_event_t *new_event = &eq->eq_events[new_index];
+	int rc;
 
 	/* must called with lnet_eq_wait_lock hold */
 	if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
@@ -323,10 +323,10 @@ static int
 lnet_eq_wait_locked(int *timeout_ms)
 __must_hold(&the_lnet.ln_eq_wait_lock)
 {
-	int		tms = *timeout_ms;
-	int		wait;
-	wait_queue_t  wl;
-	unsigned long      now;
+	int tms = *timeout_ms;
+	int wait;
+	wait_queue_t wl;
+	unsigned long now;
 
 	if (tms = 0)
 		return -1; /* don't want to wait and no new event */
@@ -392,9 +392,9 @@ int
 LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
 	   lnet_event_t *event, int *which)
 {
-	int	wait = 1;
-	int	rc;
-	int	i;
+	int wait = 1;
+	int rc;
+	int i;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 89d660f..5856c30 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -74,7 +74,7 @@ lnet_md_unlink(lnet_libmd_t *md)
 	CDEBUG(D_NET, "Unlinking md %p\n", md);
 
 	if (md->md_eq != NULL) {
-		int	cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
+		int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
 
 		LASSERT(*md->md_eq->eq_refs[cpt] > 0);
 		(*md->md_eq->eq_refs[cpt])--;
@@ -88,9 +88,9 @@ lnet_md_unlink(lnet_libmd_t *md)
 static int
 lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 {
-	int	  i;
+	int i;
 	unsigned int niov;
-	int	  total_length = 0;
+	int total_length = 0;
 
 	lmd->md_me = NULL;
 	lmd->md_start = umd->start;
@@ -268,10 +268,10 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
 {
 	LIST_HEAD(matches);
 	LIST_HEAD(drops);
-	struct lnet_me		*me;
-	struct lnet_libmd	*md;
-	int			cpt;
-	int			rc;
+	struct lnet_me *me;
+	struct lnet_libmd *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -346,9 +346,9 @@ EXPORT_SYMBOL(LNetMDAttach);
 int
 LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
 {
-	lnet_libmd_t	*md;
-	int		cpt;
-	int		rc;
+	lnet_libmd_t *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -421,9 +421,9 @@ EXPORT_SYMBOL(LNetMDBind);
 int
 LNetMDUnlink(lnet_handle_md_t mdh)
 {
-	lnet_event_t	ev;
-	lnet_libmd_t	*md;
-	int		cpt;
+	lnet_event_t ev;
+	lnet_libmd_t *md;
+	int cpt;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index a3f9292..09e9008 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -80,8 +80,8 @@ LNetMEAttach(unsigned int portal,
 	     lnet_handle_me_t *handle)
 {
 	struct lnet_match_table *mtable;
-	struct lnet_me		*me;
-	struct list_head		*head;
+	struct lnet_me *me;
+	struct list_head *head;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -151,10 +151,10 @@ LNetMEInsert(lnet_handle_me_t current_meh,
 	     lnet_unlink_t unlink, lnet_ins_pos_t pos,
 	     lnet_handle_me_t *handle)
 {
-	struct lnet_me		*current_me;
-	struct lnet_me		*new_me;
-	struct lnet_portal	*ptl;
-	int			cpt;
+	struct lnet_me *current_me;
+	struct lnet_me *new_me;
+	struct lnet_portal *ptl;
+	int cpt;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -228,10 +228,10 @@ EXPORT_SYMBOL(LNetMEInsert);
 int
 LNetMEUnlink(lnet_handle_me_t meh)
 {
-	lnet_me_t	*me;
-	lnet_libmd_t	*md;
-	lnet_event_t	ev;
-	int		cpt;
+	lnet_me_t *me;
+	lnet_libmd_t *md;
+	lnet_event_t ev;
+	int cpt;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index c2fb70e..ba59fe7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
 int
 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
 {
-	lnet_test_peer_t  *tp;
-	struct list_head	*el;
-	struct list_head	*next;
-	struct list_head	 cull;
+	lnet_test_peer_t *tp;
+	struct list_head *el;
+	struct list_head *next;
+	struct list_head cull;
 
 	LASSERT(the_lnet.ln_init);
 
@@ -103,10 +103,10 @@ static int
 fail_peer(lnet_nid_t nid, int outgoing)
 {
 	lnet_test_peer_t *tp;
-	struct list_head       *el;
-	struct list_head       *next;
-	struct list_head	cull;
-	int	       fail = 0;
+	struct list_head *el;
+	struct list_head *next;
+	struct list_head cull;
+	int fail = 0;
 
 	INIT_LIST_HEAD(&cull);
 
@@ -175,7 +175,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
 		   unsigned int nob)
 {
 	/* NB diov, siov are READ-ONLY */
-	unsigned int  this_nob;
+	unsigned int this_nob;
 
 	if (nob = 0)
 		return;
@@ -236,8 +236,8 @@ lnet_extract_iov(int dst_niov, struct kvec *dst,
 	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
 	 * for exactly 'len' bytes, and return the number of entries.
 	 * NB not destructive to 'src' */
-	unsigned int    frag_len;
-	unsigned int    niov;
+	unsigned int frag_len;
+	unsigned int niov;
 
 	if (len = 0)			   /* no data => */
 		return 0;		     /* no frags */
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(lnet_extract_iov);
 unsigned int
 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
 {
-	unsigned int  nob = 0;
+	unsigned int nob = 0;
 
 	while (niov-- > 0)
 		nob += (kiov++)->kiov_len;
@@ -294,9 +294,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
 		    unsigned int nob)
 {
 	/* NB diov, siov are READ-ONLY */
-	unsigned int    this_nob;
-	char	   *daddr = NULL;
-	char	   *saddr = NULL;
+	unsigned int this_nob;
+	char *daddr = NULL;
+	char *saddr = NULL;
 
 	if (nob = 0)
 		return;
@@ -376,8 +376,8 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
 		   unsigned int kiovoffset, unsigned int nob)
 {
 	/* NB iov, kiov are READ-ONLY */
-	unsigned int    this_nob;
-	char	   *addr = NULL;
+	unsigned int this_nob;
+	char *addr = NULL;
 
 	if (nob = 0)
 		return;
@@ -447,8 +447,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
 		   unsigned int nob)
 {
 	/* NB kiov, iov are READ-ONLY */
-	unsigned int    this_nob;
-	char	   *addr = NULL;
+	unsigned int this_nob;
+	char *addr = NULL;
 
 	if (nob = 0)
 		return;
@@ -518,8 +518,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
 	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
 	 * for exactly 'len' bytes, and return the number of entries.
 	 * NB not destructive to 'src' */
-	unsigned int    frag_len;
-	unsigned int    niov;
+	unsigned int frag_len;
+	unsigned int niov;
 
 	if (len = 0)			   /* no data => */
 		return 0;		     /* no frags */
@@ -565,10 +565,10 @@ static void
 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 	     unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-	unsigned int  niov = 0;
+	unsigned int niov = 0;
 	struct kvec *iov = NULL;
-	lnet_kiov_t  *kiov = NULL;
-	int	   rc;
+	lnet_kiov_t *kiov = NULL;
+	int rc;
 
 	LASSERT(!in_interrupt());
 	LASSERT(mlen = 0 || msg != NULL);
@@ -642,8 +642,8 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
 static void
 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	void   *priv = msg->msg_private;
-	int     rc;
+	void *priv = msg->msg_private;
+	int rc;
 
 	LASSERT(!in_interrupt());
 	LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) = LOLND ||
@@ -657,7 +657,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	int	rc;
+	int rc;
 
 	LASSERT(!msg->msg_sending);
 	LASSERT(msg->msg_receiving);
@@ -700,7 +700,7 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 static inline int
 lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
 {
-	int	alive;
+	int alive;
 	unsigned long deadline;
 
 	LASSERT(lnet_peer_aliveness_enabled(lp));
@@ -785,10 +785,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
 static int
 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
 {
-	lnet_peer_t		*lp = msg->msg_txpeer;
-	lnet_ni_t		*ni = lp->lp_ni;
-	int			cpt = msg->msg_tx_cpt;
-	struct lnet_tx_queue	*tq = ni->ni_tx_queues[cpt];
+	lnet_peer_t *lp = msg->msg_txpeer;
+	lnet_ni_t *ni = lp->lp_ni;
+	int cpt = msg->msg_tx_cpt;
+	struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
 
 	/* non-lnet_send() callers have checked before */
 	LASSERT(!do_send || msg->msg_tx_delayed);
@@ -871,8 +871,8 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
 static lnet_rtrbufpool_t *
 lnet_msg2bufpool(lnet_msg_t *msg)
 {
-	lnet_rtrbufpool_t	*rbp;
-	int			cpt;
+	lnet_rtrbufpool_t *rbp;
+	int cpt;
 
 	LASSERT(msg->msg_rx_committed);
 
@@ -894,9 +894,9 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
 	/* lnet_parse is going to lnet_net_unlock immediately after this, so it
 	 * sets do_recv FALSE and I don't do the unlock/send/lock bit.  I
 	 * return EAGAIN if msg blocked and 0 if received or OK to receive */
-	lnet_peer_t	 *lp = msg->msg_rxpeer;
-	lnet_rtrbufpool_t   *rbp;
-	lnet_rtrbuf_t       *rb;
+	lnet_peer_t *lp = msg->msg_rxpeer;
+	lnet_rtrbufpool_t *rbp;
+	lnet_rtrbuf_t *rb;
 
 	LASSERT(msg->msg_iov = NULL);
 	LASSERT(msg->msg_kiov = NULL);
@@ -967,11 +967,11 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
 void
 lnet_return_tx_credits_locked(lnet_msg_t *msg)
 {
-	lnet_peer_t	*txpeer = msg->msg_txpeer;
-	lnet_msg_t	*msg2;
+	lnet_peer_t *txpeer = msg->msg_txpeer;
+	lnet_msg_t *msg2;
 
 	if (msg->msg_txcredit) {
-		struct lnet_ni	     *ni = txpeer->lp_ni;
+		struct lnet_ni *ni = txpeer->lp_ni;
 		struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
 
 		/* give back NI txcredits */
@@ -1025,12 +1025,12 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
 void
 lnet_return_rx_credits_locked(lnet_msg_t *msg)
 {
-	lnet_peer_t	*rxpeer = msg->msg_rxpeer;
-	lnet_msg_t	*msg2;
+	lnet_peer_t *rxpeer = msg->msg_rxpeer;
+	lnet_msg_t *msg2;
 
 	if (msg->msg_rtrcredit) {
 		/* give back global router credits */
-		lnet_rtrbuf_t     *rb;
+		lnet_rtrbuf_t *rb;
 		lnet_rtrbufpool_t *rbp;
 
 		/* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
@@ -1122,13 +1122,13 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
 static lnet_peer_t *
 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
 {
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*rtr;
-	lnet_route_t		*rtr_best;
-	lnet_route_t		*rtr_last;
-	struct lnet_peer	*lp_best;
-	struct lnet_peer	*lp;
-	int			rc;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *rtr;
+	lnet_route_t *rtr_best;
+	lnet_route_t *rtr_last;
+	struct lnet_peer *lp_best;
+	struct lnet_peer *lp;
+	int rc;
 
 	/* If @rtr_nid is not LNET_NID_ANY, return the gateway with
 	 * rtr_nid nid, otherwise find the best gateway I can use */
@@ -1182,13 +1182,13 @@ lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
 int
 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
 {
-	lnet_nid_t		dst_nid = msg->msg_target.nid;
-	struct lnet_ni		*src_ni;
-	struct lnet_ni		*local_ni;
-	struct lnet_peer	*lp;
-	int			cpt;
-	int			cpt2;
-	int			rc;
+	lnet_nid_t dst_nid = msg->msg_target.nid;
+	struct lnet_ni *src_ni;
+	struct lnet_ni *local_ni;
+	struct lnet_peer *lp;
+	int cpt;
+	int cpt2;
+	int rc;
 
 	/* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
 	 * but we might want to use pre-determined router for ACK/REPLY
@@ -1364,7 +1364,7 @@ lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
 static void
 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	lnet_hdr_t	*hdr = &msg->msg_hdr;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
 
 	if (msg->msg_wanted != 0)
 		lnet_setpayloadbuffer(msg);
@@ -1383,9 +1383,9 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	lnet_hdr_t		*hdr = &msg->msg_hdr;
-	struct lnet_match_info	info;
-	int			rc;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
+	struct lnet_match_info info;
+	int rc;
 
 	/* Convert put fields to host byte order */
 	hdr->msg.put.match_bits	= le64_to_cpu(hdr->msg.put.match_bits);
@@ -1433,24 +1433,24 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
 {
-	struct lnet_match_info	info;
-	lnet_hdr_t		*hdr = &msg->msg_hdr;
-	lnet_handle_wire_t	reply_wmd;
-	int			rc;
+	struct lnet_match_info info;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
+	lnet_handle_wire_t reply_wmd;
+	int rc;
 
 	/* Convert get fields to host byte order */
-	hdr->msg.get.match_bits	  = le64_to_cpu(hdr->msg.get.match_bits);
-	hdr->msg.get.ptl_index	  = le32_to_cpu(hdr->msg.get.ptl_index);
-	hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
-	hdr->msg.get.src_offset	  = le32_to_cpu(hdr->msg.get.src_offset);
-
-	info.mi_id.nid	= hdr->src_nid;
-	info.mi_id.pid	= hdr->src_pid;
-	info.mi_opc	= LNET_MD_OP_GET;
-	info.mi_portal	= hdr->msg.get.ptl_index;
-	info.mi_rlength	= hdr->msg.get.sink_length;
-	info.mi_roffset	= hdr->msg.get.src_offset;
-	info.mi_mbits	= hdr->msg.get.match_bits;
+	hdr->msg.get.match_bits  = le64_to_cpu(hdr->msg.get.match_bits);
+	hdr->msg.get.ptl_index   = le32_to_cpu(hdr->msg.get.ptl_index);
+	hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
+	hdr->msg.get.src_offset  = le32_to_cpu(hdr->msg.get.src_offset);
+
+	info.mi_id.nid  = hdr->src_nid;
+	info.mi_id.pid  = hdr->src_pid;
+	info.mi_opc     = LNET_MD_OP_GET;
+	info.mi_portal  = hdr->msg.get.ptl_index;
+	info.mi_rlength = hdr->msg.get.sink_length;
+	info.mi_roffset = hdr->msg.get.src_offset;
+	info.mi_mbits   = hdr->msg.get.match_bits;
 
 	rc = lnet_ptl_match_md(&info, msg);
 	if (rc = LNET_MATCHMD_DROP) {
@@ -1497,13 +1497,13 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
 static int
 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	void	     *private = msg->msg_private;
-	lnet_hdr_t       *hdr = &msg->msg_hdr;
+	void *private = msg->msg_private;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
 	lnet_process_id_t src = {0};
-	lnet_libmd_t     *md;
-	int	       rlength;
-	int	       mlength;
-	int			cpt;
+	lnet_libmd_t *md;
+	int rlength;
+	int mlength;
+	int cpt;
 
 	cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
 	lnet_res_lock(cpt);
@@ -1562,10 +1562,10 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	lnet_hdr_t       *hdr = &msg->msg_hdr;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
 	lnet_process_id_t src = {0};
-	lnet_libmd_t     *md;
-	int			cpt;
+	lnet_libmd_t *md;
+	int cpt;
 
 	src.nid = hdr->src_nid;
 	src.pid = hdr->src_pid;
@@ -1612,7 +1612,7 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	int	rc = 0;
+	int rc = 0;
 
 	if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
 	    lnet_msg2bufpool(msg)->rbp_credits <= 0) {
@@ -1713,15 +1713,15 @@ int
 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
 	   void *private, int rdma_req)
 {
-	int		rc = 0;
-	int		cpt;
-	int		for_me;
-	struct lnet_msg	*msg;
-	lnet_pid_t     dest_pid;
-	lnet_nid_t     dest_nid;
-	lnet_nid_t     src_nid;
-	__u32	  payload_length;
-	__u32	  type;
+	int rc = 0;
+	int cpt;
+	int for_me;
+	struct lnet_msg *msg;
+	lnet_pid_t dest_pid;
+	lnet_nid_t dest_nid;
+	lnet_nid_t src_nid;
+	__u32 payload_length;
+	__u32 type;
 
 	LASSERT(!in_interrupt());
 
@@ -1945,8 +1945,8 @@ void
 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
 {
 	while (!list_empty(head)) {
-		lnet_process_id_t	id = {0};
-		lnet_msg_t		*msg;
+		lnet_process_id_t id = {0};
+		lnet_msg_t *msg;
 
 		msg = list_entry(head->next, lnet_msg_t, msg_list);
 		list_del(&msg->msg_list);
@@ -1986,8 +1986,8 @@ void
 lnet_recv_delayed_msg_list(struct list_head *head)
 {
 	while (!list_empty(head)) {
-		lnet_msg_t	  *msg;
-		lnet_process_id_t  id;
+		lnet_msg_t *msg;
+		lnet_process_id_t id;
 
 		msg = list_entry(head->next, lnet_msg_t, msg_list);
 		list_del(&msg->msg_list);
@@ -2063,10 +2063,10 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
 	__u64 match_bits, unsigned int offset,
 	__u64 hdr_data)
 {
-	struct lnet_msg		*msg;
-	struct lnet_libmd	*md;
-	int			cpt;
-	int			rc;
+	struct lnet_msg *msg;
+	struct lnet_libmd *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -2153,10 +2153,10 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
 	 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
 	 * lnet_finalize() is called on it, so the LND must call this first */
 
-	struct lnet_msg		*msg = lnet_msg_alloc();
-	struct lnet_libmd	*getmd = getmsg->msg_md;
-	lnet_process_id_t	peer_id = getmsg->msg_target;
-	int			cpt;
+	struct lnet_msg *msg = lnet_msg_alloc();
+	struct lnet_libmd *getmd = getmsg->msg_md;
+	lnet_process_id_t peer_id = getmsg->msg_target;
+	int cpt;
 
 	LASSERT(!getmsg->msg_target_is_router);
 	LASSERT(!getmsg->msg_routing);
@@ -2263,10 +2263,10 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
 	lnet_process_id_t target, unsigned int portal,
 	__u64 match_bits, unsigned int offset)
 {
-	struct lnet_msg		*msg;
-	struct lnet_libmd	*md;
-	int			cpt;
-	int			rc;
+	struct lnet_msg *msg;
+	struct lnet_libmd *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -2353,14 +2353,14 @@ EXPORT_SYMBOL(LNetGet);
 int
 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
 {
-	struct list_head		*e;
-	struct lnet_ni		*ni;
-	lnet_remotenet_t	*rnet;
-	__u32			dstnet = LNET_NIDNET(dstnid);
-	int			hops;
-	int			cpt;
-	__u32			order = 2;
-	struct list_head		*rn_list;
+	struct list_head *e;
+	struct lnet_ni *ni;
+	lnet_remotenet_t *rnet;
+	__u32 dstnet = LNET_NIDNET(dstnid);
+	int hops;
+	int cpt;
+	__u32 order = 2;
+	struct list_head *rn_list;
 
 	/* if !local_nid_dist_zero, I don't return a distance of 0 ever
 	 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index a46ccbf..65d7595 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -60,8 +60,8 @@ lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
 void
 lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
 {
-	lnet_hdr_t	*hdr = &msg->msg_hdr;
-	lnet_event_t	*ev  = &msg->msg_ev;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
+	lnet_event_t *ev  = &msg->msg_ev;
 
 	LASSERT(!msg->msg_routing);
 
@@ -73,7 +73,7 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
 		ev->target.pid    = le32_to_cpu(hdr->dest_pid);
 		ev->initiator.nid = LNET_NID_ANY;
 		ev->initiator.pid = the_lnet.ln_pid;
-		ev->sender	  = LNET_NID_ANY;
+		ev->sender        = LNET_NID_ANY;
 
 	} else {
 		/* event for passive message */
@@ -82,9 +82,9 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
 		ev->initiator.pid = hdr->src_pid;
 		ev->initiator.nid = hdr->src_nid;
 		ev->rlength       = hdr->payload_length;
-		ev->sender	  = msg->msg_from;
-		ev->mlength	  = msg->msg_wanted;
-		ev->offset	  = msg->msg_offset;
+		ev->sender        = msg->msg_from;
+		ev->mlength       = msg->msg_wanted;
+		ev->offset        = msg->msg_offset;
 	}
 
 	switch (ev_type) {
@@ -137,7 +137,7 @@ void
 lnet_msg_commit(lnet_msg_t *msg, int cpt)
 {
 	struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
-	lnet_counters_t		  *counters  = the_lnet.ln_counters[cpt];
+	lnet_counters_t *counters  = the_lnet.ln_counters[cpt];
 
 	/* routed message can be committed for both receiving and sending */
 	LASSERT(!msg->msg_tx_committed);
@@ -170,7 +170,7 @@ static void
 lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
 {
 	lnet_counters_t	*counters;
-	lnet_event_t	*ev = &msg->msg_ev;
+	lnet_event_t *ev = &msg->msg_ev;
 
 	LASSERT(msg->msg_tx_committed);
 	if (status != 0)
@@ -219,8 +219,8 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
 static void
 lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
 {
-	lnet_counters_t	*counters;
-	lnet_event_t	*ev = &msg->msg_ev;
+	lnet_counters_t *counters;
+	lnet_event_t *ev = &msg->msg_ev;
 
 	LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
 	LASSERT(msg->msg_rx_committed);
@@ -273,7 +273,7 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
 void
 lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
 {
-	int	cpt2 = cpt;
+	int cpt2 = cpt;
 
 	LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
 	LASSERT(msg->msg_onactivelist);
@@ -335,8 +335,8 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
 void
 lnet_msg_detach_md(lnet_msg_t *msg, int status)
 {
-	lnet_libmd_t	*md = msg->msg_md;
-	int		unlink;
+	lnet_libmd_t *md = msg->msg_md;
+	int unlink;
 
 	/* Now it's safe to drop my caller's ref */
 	md->md_refcount--;
@@ -359,8 +359,8 @@ static int
 lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
 {
 	lnet_handle_wire_t ack_wmd;
-	int		rc;
-	int		status = msg->msg_ev.status;
+	int rc;
+	int status = msg->msg_ev.status;
 
 	LASSERT(msg->msg_onactivelist);
 
@@ -434,11 +434,11 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
 void
 lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
 {
-	struct lnet_msg_container	*container;
-	int				my_slot;
-	int				cpt;
-	int				rc;
-	int				i;
+	struct lnet_msg_container *container;
+	int my_slot;
+	int cpt;
+	int rc;
+	int i;
 
 	LASSERT(!in_interrupt());
 
@@ -534,7 +534,7 @@ EXPORT_SYMBOL(lnet_finalize);
 void
 lnet_msg_container_cleanup(struct lnet_msg_container *container)
 {
-	int     count = 0;
+	int count = 0;
 
 	if (container->msc_init = 0)
 		return;
@@ -568,7 +568,7 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
 int
 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
 {
-	int	rc;
+	int rc;
 
 	container->msc_init = 1;
 
@@ -608,7 +608,7 @@ void
 lnet_msg_containers_destroy(void)
 {
 	struct lnet_msg_container *container;
-	int     i;
+	int i;
 
 	if (the_lnet.ln_msg_containers = NULL)
 		return;
@@ -624,8 +624,8 @@ int
 lnet_msg_containers_create(void)
 {
 	struct lnet_msg_container *container;
-	int	rc;
-	int	i;
+	int rc;
+	int i;
 
 	the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
 						      sizeof(*container));
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 3ba0da9..84707c5 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -39,7 +39,7 @@
 #include "../../include/linux/lnet/lib-lnet.h"
 
 /* NB: add /proc interfaces in upcoming patches */
-int	portal_rotor	= LNET_PTL_ROTOR_HASH_RT;
+int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
 module_param(portal_rotor, int, 0644);
 MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
 
@@ -47,8 +47,8 @@ static int
 lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
 		    __u64 mbits, __u64 ignore_bits)
 {
-	struct lnet_portal	*ptl = the_lnet.ln_portals[index];
-	int			unique;
+	struct lnet_portal *ptl = the_lnet.ln_portals[index];
+	int unique;
 
 	unique = ignore_bits = 0 &&
 		 match_id.nid != LNET_NID_ANY &&
@@ -89,7 +89,7 @@ static void
 lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
 {
 	struct lnet_match_table	*mtable = ptl->ptl_mtables[cpt];
-	int			i;
+	int i;
 
 	/* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
 	LASSERT(lnet_ptl_is_wildcard(ptl));
@@ -114,7 +114,7 @@ static void
 lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
 {
 	struct lnet_match_table	*mtable = ptl->ptl_mtables[cpt];
-	int			i;
+	int i;
 
 	/* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
 	LASSERT(lnet_ptl_is_wildcard(ptl));
@@ -141,9 +141,9 @@ lnet_try_match_md(lnet_libmd_t *md,
 {
 	/* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
 	 * lnet_match_blocked_msg() relies on this to avoid races */
-	unsigned int	offset;
-	unsigned int	mlength;
-	lnet_me_t	*me = md->md_me;
+	unsigned int offset;
+	unsigned int mlength;
+	lnet_me_t *me = md->md_me;
 
 	/* MD exhausted */
 	if (lnet_md_exhausted(md))
@@ -227,7 +227,7 @@ struct lnet_match_table *
 lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
 		  __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos)
 {
-	struct lnet_portal	*ptl;
+	struct lnet_portal *ptl;
 	struct lnet_match_table	*mtable;
 
 	/* NB: called w/o lock */
@@ -261,11 +261,11 @@ static struct lnet_match_table *
 lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
 {
 	struct lnet_match_table	*mtable;
-	struct lnet_portal	*ptl;
-	unsigned int		nmaps;
-	unsigned int		rotor;
-	unsigned int		cpt;
-	bool			routed;
+	struct lnet_portal *ptl;
+	unsigned int nmaps;
+	unsigned int rotor;
+	unsigned int cpt;
+	bool routed;
 
 	/* NB: called w/o lock */
 	LASSERT(info->mi_portal < the_lnet.ln_nportals);
@@ -312,8 +312,8 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
 static int
 lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
 {
-	__u64	*bmap;
-	int	i;
+	__u64 *bmap;
+	int i;
 
 	if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
 		return 0;
@@ -337,7 +337,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
 static void
 lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
 {
-	__u64	*bmap;
+	__u64 *bmap;
 
 	LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
 	LASSERT(pos <= LNET_MT_HASH_IGNORE);
@@ -373,11 +373,11 @@ int
 lnet_mt_match_md(struct lnet_match_table *mtable,
 		 struct lnet_match_info *info, struct lnet_msg *msg)
 {
-	struct list_head		*head;
-	lnet_me_t		*me;
-	lnet_me_t		*tmp;
-	int			exhausted = 0;
-	int			rc;
+	struct list_head *head;
+	lnet_me_t *me;
+	lnet_me_t *tmp;
+	int exhausted = 0;
+	int rc;
 
 	/* any ME with ignore bits? */
 	if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
@@ -428,7 +428,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
 static int
 lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
 {
-	int	rc;
+	int rc;
 
 	/* message arrived before any buffer posting on this portal,
 	 * simply delay or drop this message */
@@ -461,9 +461,9 @@ static int
 lnet_ptl_match_delay(struct lnet_portal *ptl,
 		     struct lnet_match_info *info, struct lnet_msg *msg)
 {
-	int	first = ptl->ptl_mt_maps[0]; /* read w/o lock */
-	int	rc = 0;
-	int	i;
+	int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
+	int rc = 0;
+	int i;
 
 	/* steal buffer from other CPTs, and delay it if nothing to steal,
 	 * this function is more expensive than a regular match, but we
@@ -472,7 +472,7 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
 
 	for (i = 0; i < LNET_CPT_NUMBER; i++) {
 		struct lnet_match_table *mtable;
-		int			cpt;
+		int cpt;
 
 		cpt = (first + i) % LNET_CPT_NUMBER;
 		mtable = ptl->ptl_mtables[cpt];
@@ -536,8 +536,8 @@ int
 lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
 {
 	struct lnet_match_table	*mtable;
-	struct lnet_portal	*ptl;
-	int			rc;
+	struct lnet_portal *ptl;
+	int rc;
 
 	CDEBUG(D_NET, "Request from %s of length %d into portal %d MB=%#llx\n",
 	       libcfs_id2str(info->mi_id), info->mi_rlength, info->mi_portal,
@@ -622,13 +622,13 @@ void
 lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
 		   struct list_head *matches, struct list_head *drops)
 {
-	struct lnet_portal	*ptl = the_lnet.ln_portals[me->me_portal];
+	struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
 	struct lnet_match_table	*mtable;
-	struct list_head		*head;
-	lnet_msg_t		*tmp;
-	lnet_msg_t		*msg;
-	int			exhausted = 0;
-	int			cpt;
+	struct list_head *head;
+	lnet_msg_t *tmp;
+	lnet_msg_t *msg;
+	int exhausted = 0;
+	int cpt;
 
 	LASSERT(md->md_refcount = 0); /* a brand new MD */
 
@@ -647,20 +647,20 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
 	head = &ptl->ptl_msg_stealing;
  again:
 	list_for_each_entry_safe(msg, tmp, head, msg_list) {
-		struct lnet_match_info	info;
-		lnet_hdr_t		*hdr;
-		int			rc;
+		struct lnet_match_info info;
+		lnet_hdr_t *hdr;
+		int rc;
 
 		LASSERT(msg->msg_rx_delayed || head = &ptl->ptl_msg_stealing);
 
-		hdr   = &msg->msg_hdr;
-		info.mi_id.nid	= hdr->src_nid;
-		info.mi_id.pid	= hdr->src_pid;
-		info.mi_opc	= LNET_MD_OP_PUT;
-		info.mi_portal	= hdr->msg.put.ptl_index;
-		info.mi_rlength	= hdr->payload_length;
-		info.mi_roffset	= hdr->msg.put.offset;
-		info.mi_mbits	= hdr->msg.put.match_bits;
+		hdr = &msg->msg_hdr;
+		info.mi_id.nid  = hdr->src_nid;
+		info.mi_id.pid  = hdr->src_pid;
+		info.mi_opc     = LNET_MD_OP_PUT;
+		info.mi_portal  = hdr->msg.put.ptl_index;
+		info.mi_rlength = hdr->payload_length;
+		info.mi_roffset = hdr->msg.put.offset;
+		info.mi_mbits   = hdr->msg.put.match_bits;
 
 		rc = lnet_try_match_md(md, &info, msg);
 
@@ -715,7 +715,7 @@ static void
 lnet_ptl_cleanup(struct lnet_portal *ptl)
 {
 	struct lnet_match_table	*mtable;
-	int			i;
+	int i;
 
 	if (ptl->ptl_mtables = NULL) /* uninitialized portal */
 		return;
@@ -723,9 +723,9 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
 	LASSERT(list_empty(&ptl->ptl_msg_delayed));
 	LASSERT(list_empty(&ptl->ptl_msg_stealing));
 	cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
-		struct list_head	*mhash;
-		lnet_me_t	*me;
-		int		j;
+		struct list_head *mhash;
+		lnet_me_t *me;
+		int j;
 
 		if (mtable->mt_mhash = NULL) /* uninitialized match-table */
 			continue;
@@ -753,9 +753,9 @@ static int
 lnet_ptl_setup(struct lnet_portal *ptl, int index)
 {
 	struct lnet_match_table	*mtable;
-	struct list_head		*mhash;
-	int			i;
-	int			j;
+	struct list_head *mhash;
+	int i;
+	int j;
 
 	ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
 					    sizeof(struct lnet_match_table));
@@ -798,7 +798,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
 void
 lnet_portals_destroy(void)
 {
-	int	i;
+	int i;
 
 	if (the_lnet.ln_portals = NULL)
 		return;
@@ -813,8 +813,8 @@ lnet_portals_destroy(void)
 int
 lnet_portals_create(void)
 {
-	int	size;
-	int	i;
+	int size;
+	int i;
 
 	size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]);
 
@@ -898,8 +898,8 @@ EXPORT_SYMBOL(LNetSetLazyPortal);
 int
 LNetClearLazyPortal(int portal)
 {
-	struct lnet_portal	*ptl;
-	LIST_HEAD		(zombies);
+	struct lnet_portal *ptl;
+	LIST_HEAD(zombies);
 
 	if (portal < 0 || portal >= the_lnet.ln_nportals)
 		return -EINVAL;
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index f708c2e..2a137f4 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -111,7 +111,7 @@ lnd_t the_lolnd = {
 	/* .lnd_type       = */ LOLND,
 	/* .lnd_startup    = */ lolnd_startup,
 	/* .lnd_shutdown   = */ lolnd_shutdown,
-	/* .lnt_ctl	= */ NULL,
+	/* .lnt_ctl        = */ NULL,
 	/* .lnd_send       = */ lolnd_send,
 	/* .lnd_recv       = */ lolnd_recv,
 	/* .lnd_eager_recv = */ NULL,
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 72b7fbc..ff3f831 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -47,7 +47,7 @@ static int
 lnet_configure(void *arg)
 {
 	/* 'arg' only there so I can be passed to cfs_create_thread() */
-	int    rc = 0;
+	int rc = 0;
 
 	LNET_MUTEX_LOCK(&lnet_config_mutex);
 
@@ -66,7 +66,7 @@ lnet_configure(void *arg)
 static int
 lnet_unconfigure(void)
 {
-	int   refcount;
+	int refcount;
 
 	LNET_MUTEX_LOCK(&lnet_config_mutex);
 
@@ -86,7 +86,7 @@ lnet_unconfigure(void)
 static int
 lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
 {
-	int   rc;
+	int rc;
 
 	switch (cmd) {
 	case IOC_LIBCFS_CONFIGURE:
@@ -113,7 +113,7 @@ static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
 static int __init
 init_lnet(void)
 {
-	int		  rc;
+	int rc;
 
 	mutex_init(&lnet_config_mutex);
 
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 45b5742..1fceed3 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -43,10 +43,10 @@
 int
 lnet_peer_tables_create(void)
 {
-	struct lnet_peer_table	*ptable;
-	struct list_head		*hash;
-	int			i;
-	int			j;
+	struct lnet_peer_table *ptable;
+	struct list_head *hash;
+	int i;
+	int j;
 
 	the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
 						   sizeof(*ptable));
@@ -77,10 +77,10 @@ lnet_peer_tables_create(void)
 void
 lnet_peer_tables_destroy(void)
 {
-	struct lnet_peer_table	*ptable;
-	struct list_head		*hash;
-	int			i;
-	int			j;
+	struct lnet_peer_table *ptable;
+	struct list_head *hash;
+	int i;
+	int j;
 
 	if (the_lnet.ln_peer_tables = NULL)
 		return;
@@ -106,9 +106,9 @@ lnet_peer_tables_destroy(void)
 void
 lnet_peer_tables_cleanup(void)
 {
-	struct lnet_peer_table	*ptable;
-	int			i;
-	int			j;
+	struct lnet_peer_table *ptable;
+	int i;
+	int j;
 
 	LASSERT(the_lnet.ln_shutdown);	/* i.e. no new peers */
 
@@ -133,7 +133,7 @@ lnet_peer_tables_cleanup(void)
 
 	cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
 		LIST_HEAD(deathrow);
-		lnet_peer_t	*lp;
+		lnet_peer_t *lp;
 
 		lnet_net_lock(i);
 
@@ -186,8 +186,8 @@ lnet_destroy_peer_locked(lnet_peer_t *lp)
 lnet_peer_t *
 lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
 {
-	struct list_head	*peers;
-	lnet_peer_t	*lp;
+	struct list_head *peers;
+	lnet_peer_t *lp;
 
 	LASSERT(!the_lnet.ln_shutdown);
 
@@ -205,11 +205,11 @@ lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
 int
 lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
 {
-	struct lnet_peer_table	*ptable;
-	lnet_peer_t		*lp = NULL;
-	lnet_peer_t		*lp2;
-	int			cpt2;
-	int			rc = 0;
+	struct lnet_peer_table *ptable;
+	lnet_peer_t *lp = NULL;
+	lnet_peer_t *lp2;
+	int cpt2;
+	int rc = 0;
 
 	*lpp = NULL;
 	if (the_lnet.ln_shutdown) /* it's shutting down */
@@ -287,8 +287,8 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
 		goto out;
 	}
 
-	lp->lp_txcredits    -	lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
+	lp->lp_txcredits     +	lp->lp_mintxcredits  = lp->lp_ni->ni_peertxcredits;
 	lp->lp_rtrcredits     	lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
 
@@ -308,10 +308,10 @@ out:
 void
 lnet_debug_peer(lnet_nid_t nid)
 {
-	char		*aliveness = "NA";
-	lnet_peer_t	*lp;
-	int		rc;
-	int		cpt;
+	char *aliveness = "NA";
+	lnet_peer_t *lp;
+	int rc;
+	int cpt;
 
 	cpt = lnet_cpt_of_nid(nid);
 	lnet_net_lock(cpt);
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 8510bae..96886a2 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -139,8 +139,8 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
 static void
 lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 {
-	int	alive;
-	int	notifylnd;
+	int alive;
+	int notifylnd;
 
 	/* Notify only in 1 thread at any time to ensure ordered notification.
 	 * NB individual events can be missed; the only guarantee is that you
@@ -152,7 +152,7 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 	lp->lp_notifying = 1;
 
 	while (lp->lp_notify) {
-		alive     = lp->lp_alive;
+		alive = lp->lp_alive;
 		notifylnd = lp->lp_notifylnd;
 
 		lp->lp_notifylnd = 0;
@@ -228,9 +228,9 @@ lnet_rtr_decref_locked(lnet_peer_t *lp)
 lnet_remotenet_t *
 lnet_find_net_locked(__u32 net)
 {
-	lnet_remotenet_t	*rnet;
-	struct list_head		*tmp;
-	struct list_head		*rn_list;
+	lnet_remotenet_t *rnet;
+	struct list_head *tmp;
+	struct list_head *rn_list;
 
 	LASSERT(!the_lnet.ln_shutdown);
 
@@ -276,9 +276,9 @@ static void lnet_shuffle_seed(void)
 static void
 lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
 {
-	unsigned int      len = 0;
-	unsigned int      offset = 0;
-	struct list_head       *e;
+	unsigned int len = 0;
+	unsigned int offset = 0;
+	struct list_head *e;
 
 	lnet_shuffle_seed();
 
@@ -304,13 +304,13 @@ int
 lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
 	       unsigned int priority)
 {
-	struct list_head	  *e;
-	lnet_remotenet_t    *rnet;
-	lnet_remotenet_t    *rnet2;
-	lnet_route_t	*route;
-	lnet_ni_t	   *ni;
-	int		  add_route;
-	int		  rc;
+	struct list_head *e;
+	lnet_remotenet_t *rnet;
+	lnet_remotenet_t *rnet2;
+	lnet_route_t *route;
+	lnet_ni_t *ni;
+	int add_route;
+	int rc;
 
 	CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
 	       libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
@@ -416,14 +416,14 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
 int
 lnet_check_routes(void)
 {
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*route;
-	lnet_route_t		*route2;
-	struct list_head		*e1;
-	struct list_head		*e2;
-	int			cpt;
-	struct list_head		*rn_list;
-	int			i;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *route;
+	lnet_route_t *route2;
+	struct list_head *e1;
+	struct list_head *e2;
+	int cpt;
+	struct list_head *rn_list;
+	int i;
 
 	cpt = lnet_net_lock_current();
 
@@ -434,9 +434,9 @@ lnet_check_routes(void)
 
 			route2 = NULL;
 			list_for_each(e2, &rnet->lrn_routes) {
-				lnet_nid_t	nid1;
-				lnet_nid_t	nid2;
-				int		net;
+				lnet_nid_t nid1;
+				lnet_nid_t nid2;
+				int net;
 
 				route = list_entry(e2, lnet_route_t,
 						       lr_list);
@@ -472,14 +472,14 @@ lnet_check_routes(void)
 int
 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
 {
-	struct lnet_peer	*gateway;
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*route;
-	struct list_head		*e1;
-	struct list_head		*e2;
-	int			rc = -ENOENT;
-	struct list_head		*rn_list;
-	int			idx = 0;
+	struct lnet_peer *gateway;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *route;
+	struct list_head *e1;
+	struct list_head *e2;
+	int rc = -ENOENT;
+	struct list_head *rn_list;
+	int idx = 0;
 
 	CDEBUG(D_NET, "Del route: net %s : gw %s\n",
 	       libcfs_net2str(net), libcfs_nid2str(gw_nid));
@@ -554,13 +554,13 @@ int
 lnet_get_route(int idx, __u32 *net, __u32 *hops,
 	       lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
 {
-	struct list_head		*e1;
-	struct list_head		*e2;
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*route;
-	int			cpt;
-	int			i;
-	struct list_head		*rn_list;
+	struct list_head *e1;
+	struct list_head *e2;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *route;
+	int cpt;
+	int i;
+	struct list_head *rn_list;
 
 	cpt = lnet_net_lock_current();
 
@@ -574,11 +574,11 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
 						       lr_list);
 
 				if (idx-- = 0) {
-					*net	  = rnet->lrn_net;
-					*hops	  = route->lr_hops;
+					*net      = rnet->lrn_net;
+					*hops     = route->lr_hops;
 					*priority = route->lr_priority;
 					*gateway  = route->lr_gateway->lp_nid;
-					*alive	  = route->lr_gateway->lp_alive;
+					*alive    = route->lr_gateway->lp_alive;
 					lnet_net_unlock(cpt);
 					return 0;
 				}
@@ -593,7 +593,7 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
 void
 lnet_swap_pinginfo(lnet_ping_info_t *info)
 {
-	int	       i;
+	int i;
 	lnet_ni_status_t *stat;
 
 	__swab32s(&info->pi_magic);
@@ -614,9 +614,9 @@ lnet_swap_pinginfo(lnet_ping_info_t *info)
 static void
 lnet_parse_rc_info(lnet_rc_data_t *rcd)
 {
-	lnet_ping_info_t	*info = rcd->rcd_pinginfo;
-	struct lnet_peer	*gw   = rcd->rcd_gateway;
-	lnet_route_t		*rtr;
+	lnet_ping_info_t *info = rcd->rcd_pinginfo;
+	struct lnet_peer *gw = rcd->rcd_gateway;
+	lnet_route_t *rtr;
 
 	if (!gw->lp_alive)
 		return;
@@ -643,14 +643,14 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
 		return; /* can't carry NI status info */
 
 	list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
-		int	ptl_status = LNET_NI_STATUS_INVALID;
-		int	down = 0;
-		int	up = 0;
-		int	i;
+		int ptl_status = LNET_NI_STATUS_INVALID;
+		int down = 0;
+		int up = 0;
+		int i;
 
 		for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
 			lnet_ni_status_t *stat = &info->pi_ni[i];
-			lnet_nid_t	 nid = stat->ns_nid;
+			lnet_nid_t nid = stat->ns_nid;
 
 			if (nid = LNET_NID_ANY) {
 				CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
@@ -699,8 +699,8 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
 static void
 lnet_router_checker_event(lnet_event_t *event)
 {
-	lnet_rc_data_t		*rcd = event->md.user_ptr;
-	struct lnet_peer	*lp;
+	lnet_rc_data_t *rcd = event->md.user_ptr;
+	struct lnet_peer *lp;
 
 	LASSERT(rcd != NULL);
 
@@ -752,14 +752,14 @@ lnet_router_checker_event(lnet_event_t *event)
 static void
 lnet_wait_known_routerstate(void)
 {
-	lnet_peer_t	 *rtr;
-	struct list_head	  *entry;
-	int		  all_known;
+	lnet_peer_t *rtr;
+	struct list_head *entry;
+	int all_known;
 
 	LASSERT(the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING);
 
 	for (;;) {
-		int	cpt = lnet_net_lock_current();
+		int cpt = lnet_net_lock_current();
 
 		all_known = 1;
 		list_for_each(entry, &the_lnet.ln_routers) {
@@ -799,9 +799,9 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
 static void
 lnet_update_ni_status_locked(void)
 {
-	lnet_ni_t	*ni;
-	long		now;
-	int		timeout;
+	lnet_ni_t *ni;
+	long now;
+	int timeout;
 
 	LASSERT(the_lnet.ln_routing);
 
@@ -860,10 +860,10 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
 static lnet_rc_data_t *
 lnet_create_rc_data_locked(lnet_peer_t *gateway)
 {
-	lnet_rc_data_t		*rcd = NULL;
-	lnet_ping_info_t	*pi;
-	int			rc;
-	int			i;
+	lnet_rc_data_t *rcd = NULL;
+	lnet_ping_info_t *pi;
+	int rc;
+	int i;
 
 	lnet_net_unlock(gateway->lp_cpt);
 
@@ -943,8 +943,8 @@ static void
 lnet_ping_router_locked(lnet_peer_t *rtr)
 {
 	lnet_rc_data_t *rcd = NULL;
-	unsigned long      now = cfs_time_current();
-	int	     secs;
+	unsigned long now = cfs_time_current();
+	int secs;
 
 	lnet_peer_addref_locked(rtr);
 
@@ -979,9 +979,9 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
 	if (secs != 0 && !rtr->lp_ping_notsent &&
 	    cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
 					     cfs_time_seconds(secs)))) {
-		int	       rc;
+		int rc;
 		lnet_process_id_t id;
-		lnet_handle_md_t  mdh;
+		lnet_handle_md_t mdh;
 
 		id.nid = rtr->lp_nid;
 		id.pid = LUSTRE_SRV_LNET_PID;
@@ -1013,8 +1013,8 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
 int
 lnet_router_checker_start(void)
 {
-	int	  rc;
-	int	  eqsz;
+	int rc;
+	int eqsz;
 
 	LASSERT(the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN);
 
@@ -1085,11 +1085,11 @@ lnet_router_checker_stop(void)
 static void
 lnet_prune_rc_data(int wait_unlink)
 {
-	lnet_rc_data_t		*rcd;
-	lnet_rc_data_t		*tmp;
-	lnet_peer_t		*lp;
-	struct list_head		head;
-	int			i = 2;
+	lnet_rc_data_t *rcd;
+	lnet_rc_data_t *tmp;
+	lnet_peer_t *lp;
+	struct list_head head;
+	int i = 2;
 
 	if (likely(the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING &&
 		   list_empty(&the_lnet.ln_rcd_deathrow) &&
@@ -1169,17 +1169,17 @@ lnet_prune_rc_data(int wait_unlink)
 static int
 lnet_router_checker(void *arg)
 {
-	lnet_peer_t       *rtr;
-	struct list_head	*entry;
+	lnet_peer_t *rtr;
+	struct list_head *entry;
 
 	cfs_block_allsigs();
 
 	LASSERT(the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING);
 
 	while (the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING) {
-		__u64	version;
-		int	cpt;
-		int	cpt2;
+		__u64 version;
+		int cpt;
+		int cpt2;
 
 		cpt = lnet_net_lock_current();
 rescan:
@@ -1245,11 +1245,11 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
 static lnet_rtrbuf_t *
 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
 {
-	int	    npages = rbp->rbp_npages;
-	int	    sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
-	struct page   *page;
+	int npages = rbp->rbp_npages;
+	int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+	struct page *page;
 	lnet_rtrbuf_t *rb;
-	int	    i;
+	int i;
 
 	LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
 	if (rb = NULL)
@@ -1280,9 +1280,9 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
 static void
 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
 {
-	int		npages = rbp->rbp_npages;
-	int		nbuffers = 0;
-	lnet_rtrbuf_t	*rb;
+	int npages = rbp->rbp_npages;
+	int nbuffers = 0;
+	lnet_rtrbuf_t *rb;
 
 	if (rbp->rbp_nbuffers = 0) /* not initialized or already freed */
 		return;
@@ -1310,7 +1310,7 @@ static int
 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
 {
 	lnet_rtrbuf_t *rb;
-	int	    i;
+	int i;
 
 	if (rbp->rbp_nbuffers != 0) {
 		LASSERT(rbp->rbp_nbuffers = nbufs);
@@ -1355,7 +1355,7 @@ void
 lnet_rtrpools_free(void)
 {
 	lnet_rtrbufpool_t *rtrp;
-	int		  i;
+	int i;
 
 	if (the_lnet.ln_rtrpools = NULL) /* uninitialized or freed */
 		return;
@@ -1373,7 +1373,7 @@ lnet_rtrpools_free(void)
 static int
 lnet_nrb_tiny_calculate(int npages)
 {
-	int	nrbs = LNET_NRB_TINY;
+	int nrbs = LNET_NRB_TINY;
 
 	if (tiny_router_buffers < 0) {
 		LCONSOLE_ERROR_MSG(0x10c,
@@ -1392,7 +1392,7 @@ lnet_nrb_tiny_calculate(int npages)
 static int
 lnet_nrb_small_calculate(int npages)
 {
-	int	nrbs = LNET_NRB_SMALL;
+	int nrbs = LNET_NRB_SMALL;
 
 	if (small_router_buffers < 0) {
 		LCONSOLE_ERROR_MSG(0x10c,
@@ -1411,7 +1411,7 @@ lnet_nrb_small_calculate(int npages)
 static int
 lnet_nrb_large_calculate(int npages)
 {
-	int	nrbs = LNET_NRB_LARGE;
+	int nrbs = LNET_NRB_LARGE;
 
 	if (large_router_buffers < 0) {
 		LCONSOLE_ERROR_MSG(0x10c,
@@ -1431,13 +1431,13 @@ int
 lnet_rtrpools_alloc(int im_a_router)
 {
 	lnet_rtrbufpool_t *rtrp;
-	int	large_pages;
-	int	small_pages = 1;
-	int	nrb_tiny;
-	int	nrb_small;
-	int	nrb_large;
-	int	rc;
-	int	i;
+	int large_pages;
+	int small_pages = 1;
+	int nrb_tiny;
+	int nrb_small;
+	int nrb_large;
+	int rc;
+	int i;
 
 	large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
@@ -1507,9 +1507,9 @@ lnet_rtrpools_alloc(int im_a_router)
 int
 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 {
-	struct lnet_peer	*lp = NULL;
-	unsigned long		now = cfs_time_current();
-	int			cpt = lnet_cpt_of_nid(nid);
+	struct lnet_peer *lp = NULL;
+	unsigned long now = cfs_time_current();
+	int cpt = lnet_cpt_of_nid(nid);
 
 	LASSERT(!in_interrupt ());
 
@@ -1591,13 +1591,13 @@ void
 lnet_router_checker(void)
 {
 	static time_t last;
-	static int    running;
+	static int running;
 
-	time_t	    now = get_seconds();
-	int	       interval = now - last;
-	int	       rc;
-	__u64	     version;
-	lnet_peer_t      *rtr;
+	time_t now = get_seconds();
+	int interval = now - last;
+	int rc;
+	__u64 version;
+	lnet_peer_t *rtr;
 
 	/* It's no use to call me again within a sec - all intervals and
 	 * timeouts are measured in seconds */
@@ -1625,7 +1625,7 @@ lnet_router_checker(void)
 
 	/* consume all pending events */
 	while (1) {
-		int	  i;
+		int i;
 		lnet_event_t ev;
 
 		/* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index c055afc..ee902dc 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -112,11 +112,11 @@ static int proc_call_handler(void *data, int write, loff_t *ppos,
 static int __proc_lnet_stats(void *data, int write,
 			     loff_t pos, void __user *buffer, int nob)
 {
-	int	      rc;
+	int rc;
 	lnet_counters_t *ctrs;
-	int	      len;
-	char	    *tmpstr;
-	const int	tmpsiz = 256; /* 7 %u and 4 %llu */
+	int len;
+	char *tmpstr;
+	const int tmpsiz = 256; /* 7 %u and 4 %llu */
 
 	if (write) {
 		lnet_counters_reset();
@@ -167,13 +167,13 @@ static int proc_lnet_stats(struct ctl_table *table, int write,
 static int proc_lnet_routes(struct ctl_table *table, int write,
 			    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	const int	tmpsiz = 256;
-	char		*tmpstr;
-	char		*s;
-	int		rc = 0;
-	int		len;
-	int		ver;
-	int		off;
+	const int tmpsiz = 256;
+	char *tmpstr;
+	char *s;
+	int rc = 0;
+	int len;
+	int ver;
+	int off;
 
 	CLASSERT(sizeof(loff_t) >= 4);
 
@@ -205,13 +205,13 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
 		lnet_net_unlock(0);
 		*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
 	} else {
-		struct list_head		*n;
-		struct list_head		*r;
-		lnet_route_t		*route = NULL;
-		lnet_remotenet_t	*rnet  = NULL;
-		int			skip  = off - 1;
-		struct list_head		*rn_list;
-		int			i;
+		struct list_head *n;
+		struct list_head *r;
+		lnet_route_t *route = NULL;
+		lnet_remotenet_t *rnet  = NULL;
+		int skip  = off - 1;
+		struct list_head *rn_list;
+		int i;
 
 		lnet_net_lock(0);
 
@@ -251,11 +251,11 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
 		}
 
 		if (route != NULL) {
-			__u32        net	= rnet->lrn_net;
-			unsigned int hops	= route->lr_hops;
-			unsigned int priority	= route->lr_priority;
-			lnet_nid_t   nid	= route->lr_gateway->lp_nid;
-			int          alive	= route->lr_gateway->lp_alive;
+			__u32 net = rnet->lrn_net;
+			unsigned int hops = route->lr_hops;
+			unsigned int priority = route->lr_priority;
+			lnet_nid_t nid = route->lr_gateway->lp_nid;
+			int alive = route->lr_gateway->lp_alive;
 
 			s += snprintf(s, tmpstr + tmpsiz - s,
 				      "%-8s %4u %8u %7s %s\n",
@@ -293,13 +293,13 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
 static int proc_lnet_routers(struct ctl_table *table, int write,
 			     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	int	rc = 0;
-	char      *tmpstr;
-	char      *s;
-	const int  tmpsiz = 256;
-	int	len;
-	int	ver;
-	int	off;
+	int rc = 0;
+	char *tmpstr;
+	char *s;
+	const int tmpsiz = 256;
+	int len;
+	int ver;
+	int off;
 
 	off = LNET_PROC_HOFF_GET(*ppos);
 	ver = LNET_PROC_VER_GET(*ppos);
@@ -328,9 +328,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 		lnet_net_unlock(0);
 		*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
 	} else {
-		struct list_head		*r;
-		struct lnet_peer	*peer = NULL;
-		int			skip = off - 1;
+		struct list_head *r;
+		struct lnet_peer *peer = NULL;
+		int skip = off - 1;
 
 		lnet_net_lock(0);
 
@@ -360,14 +360,14 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 			lnet_nid_t nid = peer->lp_nid;
 			unsigned long now = cfs_time_current();
 			unsigned long deadline = peer->lp_ping_deadline;
-			int nrefs     = peer->lp_refcount;
-			int nrtrrefs  = peer->lp_rtr_refcount;
+			int nrefs = peer->lp_refcount;
+			int nrtrrefs = peer->lp_rtr_refcount;
 			int alive_cnt = peer->lp_alive_count;
-			int alive     = peer->lp_alive;
-			int pingsent  = !peer->lp_ping_notsent;
+			int alive = peer->lp_alive;
+			int pingsent = !peer->lp_ping_notsent;
 			int last_ping = cfs_duration_sec(cfs_time_sub(now,
 						     peer->lp_ping_timestamp));
-			int down_ni   = 0;
+			int down_ni = 0;
 			lnet_route_t *rtr;
 
 			if ((peer->lp_ping_feats &
@@ -428,16 +428,16 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 static int proc_lnet_peers(struct ctl_table *table, int write,
 			   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	const int		tmpsiz  = 256;
-	struct lnet_peer_table	*ptable;
-	char			*tmpstr;
-	char			*s;
-	int			cpt  = LNET_PROC_CPT_GET(*ppos);
-	int			ver  = LNET_PROC_VER_GET(*ppos);
-	int			hash = LNET_PROC_HASH_GET(*ppos);
-	int			hoff = LNET_PROC_HOFF_GET(*ppos);
-	int			rc = 0;
-	int			len;
+	const int tmpsiz  = 256;
+	struct lnet_peer_table *ptable;
+	char *tmpstr;
+	char *s;
+	int cpt  = LNET_PROC_CPT_GET(*ppos);
+	int ver  = LNET_PROC_VER_GET(*ppos);
+	int hash = LNET_PROC_HASH_GET(*ppos);
+	int hoff = LNET_PROC_HOFF_GET(*ppos);
+	int rc = 0;
+	int len;
 
 	CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
 	LASSERT(!write);
@@ -465,9 +465,9 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
 
 		hoff++;
 	} else {
-		struct lnet_peer	*peer;
-		struct list_head		*p;
-		int			skip;
+		struct lnet_peer *peer;
+		struct list_head *p;
+		int skip;
  again:
 		p = NULL;
 		peer = NULL;
@@ -521,23 +521,23 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
 		}
 
 		if (peer != NULL) {
-			lnet_nid_t nid       = peer->lp_nid;
-			int	nrefs     = peer->lp_refcount;
-			int	lastalive = -1;
-			char      *aliveness = "NA";
-			int	maxcr     = peer->lp_ni->ni_peertxcredits;
-			int	txcr      = peer->lp_txcredits;
-			int	mintxcr   = peer->lp_mintxcredits;
-			int	rtrcr     = peer->lp_rtrcredits;
-			int	minrtrcr  = peer->lp_minrtrcredits;
-			int	txqnob    = peer->lp_txqnob;
+			lnet_nid_t nid = peer->lp_nid;
+			int nrefs = peer->lp_refcount;
+			int lastalive = -1;
+			char *aliveness = "NA";
+			int maxcr = peer->lp_ni->ni_peertxcredits;
+			int txcr = peer->lp_txcredits;
+			int mintxcr = peer->lp_mintxcredits;
+			int rtrcr = peer->lp_rtrcredits;
+			int minrtrcr = peer->lp_minrtrcredits;
+			int txqnob = peer->lp_txqnob;
 
 			if (lnet_isrouter(peer) ||
 			    lnet_peer_aliveness_enabled(peer))
 				aliveness = peer->lp_alive ? "up" : "down";
 
 			if (lnet_peer_aliveness_enabled(peer)) {
-				unsigned long     now = cfs_time_current();
+				unsigned long now = cfs_time_current();
 				long delta;
 
 				delta = cfs_time_sub(now, peer->lp_last_alive);
@@ -595,13 +595,13 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
 static int __proc_lnet_buffers(void *data, int write,
 			       loff_t pos, void __user *buffer, int nob)
 {
-	char	    *s;
-	char	    *tmpstr;
-	int		tmpsiz;
-	int		idx;
-	int		len;
-	int		rc;
-	int		i;
+	char *s;
+	char *tmpstr;
+	int tmpsiz;
+	int idx;
+	int len;
+	int rc;
+	int i;
 
 	LASSERT(!write);
 
@@ -660,11 +660,11 @@ static int proc_lnet_buffers(struct ctl_table *table, int write,
 static int proc_lnet_nis(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	int	tmpsiz = 128 * LNET_CPT_NUMBER;
-	int	rc = 0;
-	char      *tmpstr;
-	char      *s;
-	int	len;
+	int tmpsiz = 128 * LNET_CPT_NUMBER;
+	int rc = 0;
+	char *tmpstr;
+	char *s;
+	int len;
 
 	LASSERT(!write);
 
@@ -684,9 +684,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
 			      "rtr", "max", "tx", "min");
 		LASSERT(tmpstr + tmpsiz - s > 0);
 	} else {
-		struct list_head	*n;
-		lnet_ni_t	 *ni   = NULL;
-		int		skip = *ppos - 1;
+		struct list_head *n;
+		lnet_ni_t *ni   = NULL;
+		int skip = *ppos - 1;
 
 		lnet_net_lock(0);
 
@@ -705,12 +705,12 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
 		}
 
 		if (ni != NULL) {
-			struct lnet_tx_queue	*tq;
-			char	*stat;
-			long	now = get_seconds();
-			int	last_alive = -1;
-			int	i;
-			int	j;
+			struct lnet_tx_queue *tq;
+			char *stat;
+			long now = get_seconds();
+			int last_alive = -1;
+			int i;
+			int j;
 
 			if (the_lnet.ln_routing)
 				last_alive = now - ni->ni_last_alive;
@@ -777,9 +777,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
 }
 
 struct lnet_portal_rotors {
-	int	     pr_value;
-	const char      *pr_name;
-	const char	*pr_desc;
+	int pr_value;
+	const char *pr_name;
+	const char *pr_desc;
 };
 
 static struct lnet_portal_rotors	portal_rotors[] = {
@@ -815,11 +815,11 @@ extern int portal_rotor;
 static int __proc_lnet_portal_rotor(void *data, int write,
 				    loff_t pos, void __user *buffer, int nob)
 {
-	const int	buf_len	= 128;
-	char		*buf;
-	char		*tmp;
-	int		rc;
-	int		i;
+	const int buf_len = 128;
+	char *buf;
+	char *tmp;
+	int rc;
+	int i;
 
 	LIBCFS_ALLOC(buf, buf_len);
 	if (buf = NULL)
@@ -887,38 +887,38 @@ static struct ctl_table lnet_table[] = {
 	 * to go via /proc for portability.
 	 */
 	{
-		.procname = "stats",
-		.mode     = 0644,
+		.procname     = "stats",
+		.mode         = 0644,
 		.proc_handler = &proc_lnet_stats,
 	},
 	{
-		.procname = "routes",
-		.mode     = 0444,
+		.procname     = "routes",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_routes,
 	},
 	{
-		.procname = "routers",
-		.mode     = 0444,
+		.procname     = "routers",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_routers,
 	},
 	{
-		.procname = "peers",
-		.mode     = 0444,
+		.procname     = "peers",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_peers,
 	},
 	{
-		.procname = "buffers",
-		.mode     = 0444,
+		.procname     = "buffers",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_buffers,
 	},
 	{
-		.procname = "nis",
-		.mode     = 0444,
+		.procname     = "nis",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_nis,
 	},
 	{
-		.procname = "portal_rotor",
-		.mode     = 0644,
+		.procname     = "portal_rotor",
+		.mode         = 0644,
 		.proc_handler = &proc_lnet_portal_rotor,
 	},
 	{
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync Mike Shuey
                   ` (20 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

lnet_fini_locks() does nothing.  Remove.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/api-ni.c |    8 --------
 1 files changed, 0 insertions(+), 8 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 6910f56..1adc481 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -98,12 +98,6 @@ lnet_init_locks(void)
 	mutex_init(&the_lnet.ln_api_mutex);
 }
 
-static void
-lnet_fini_locks(void)
-{
-}
-
-
 static int
 lnet_create_remote_nets_table(void)
 {
@@ -153,8 +147,6 @@ lnet_destroy_locks(void)
 		cfs_percpt_lock_free(the_lnet.ln_net_lock);
 		the_lnet.ln_net_lock = NULL;
 	}
-
-	lnet_fini_locks();
 }
 
 static int
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals Mike Shuey
                   ` (19 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

LNetSetAsync() returns 0, and is never called.  Doesn't exist in the Intel
tree, either.  Remove it.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/include/linux/lnet/api.h |    1 -
 drivers/staging/lustre/lnet/lnet/lib-move.c     |   24 -----------------------
 2 files changed, 0 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index cd86517..c4dc1b2 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -209,7 +209,6 @@ int LNetGet(lnet_nid_t	self,
 int LNetSetLazyPortal(int portal);
 int LNetClearLazyPortal(int portal);
 int LNetCtl(unsigned int cmd, void *arg);
-int LNetSetAsync(lnet_process_id_t id, int nasync);
 
 /** @} lnet_misc */
 
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index ba59fe7..433faae 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -2434,27 +2434,3 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
 	return -EHOSTUNREACH;
 }
 EXPORT_SYMBOL(LNetDist);
-
-/**
- * Set the number of asynchronous messages expected from a target process.
- *
- * This function is only meaningful for userspace callers. It's a no-op when
- * called from kernel.
- *
- * Asynchronous messages are those that can come from a target when the
- * userspace process is not waiting for IO to complete; e.g., AST callbacks
- * from Lustre servers. Specifying the expected number of such messages
- * allows them to be eagerly received when user process is not running in
- * LNet; otherwise network errors may occur.
- *
- * \param id Process ID of the target process.
- * \param nasync Number of asynchronous messages expected from the target.
- *
- * \return 0 on success, and an error code otherwise.
- */
-int
-LNetSetAsync(lnet_process_id_t id, int nasync)
-{
-	return 0;
-}
-EXPORT_SYMBOL(LNetSetAsync);
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (2 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
                   ` (18 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Fix the module version to match upstream development.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/module.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index ff3f831..f73d644 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -147,7 +147,7 @@ fini_lnet(void)
 }
 
 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
-MODULE_DESCRIPTION("Portals v3.1");
+MODULE_DESCRIPTION("LNet v3.1");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0.0");
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (3 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-22 13:03   ` Dan Carpenter
  2015-05-21 19:50 ` [PATCH v4 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro Mike Shuey
                   ` (17 subsequent siblings)
  22 siblings, 1 reply; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Unify variable declarations to use a single whitespace.  Also line up
declarations and comments in o2iblnd.h.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c    |  458 +++++++-------
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h    |  680 ++++++++++----------
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |  484 +++++++-------
 .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c  |   48 +-
 4 files changed, 841 insertions(+), 829 deletions(-)

diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 3bad441..a57c5c3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -42,21 +42,21 @@
 #include <asm/div64.h>
 
 static lnd_t the_o2iblnd = {
-	.lnd_type       = O2IBLND,
-	.lnd_startup    = kiblnd_startup,
-	.lnd_shutdown   = kiblnd_shutdown,
-	.lnd_ctl	= kiblnd_ctl,
-	.lnd_query      = kiblnd_query,
-	.lnd_send       = kiblnd_send,
-	.lnd_recv       = kiblnd_recv,
+	.lnd_type     = O2IBLND,
+	.lnd_startup  = kiblnd_startup,
+	.lnd_shutdown = kiblnd_shutdown,
+	.lnd_ctl      = kiblnd_ctl,
+	.lnd_query    = kiblnd_query,
+	.lnd_send     = kiblnd_send,
+	.lnd_recv     = kiblnd_recv,
 };
 
-kib_data_t	      kiblnd_data;
+kib_data_t kiblnd_data;
 
 static __u32 kiblnd_cksum(void *ptr, int nob)
 {
-	char  *c  = ptr;
-	__u32  sum = 0;
+	char *c = ptr;
+	__u32 sum = 0;
 
 	while (nob-- > 0)
 		sum = ((sum << 1) | (sum >> 31)) + *c++;
@@ -138,10 +138,10 @@ static int kiblnd_msgtype2size(int type)
 
 static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
 {
-	kib_rdma_desc_t   *rd;
-	int		nob;
-	int		n;
-	int		i;
+	kib_rdma_desc_t *rd;
+	int nob;
+	int n;
+	int i;
 
 	LASSERT(msg->ibm_type = IBLND_MSG_GET_REQ ||
 		 msg->ibm_type = IBLND_MSG_PUT_ACK);
@@ -210,10 +210,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
 int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
 {
 	const int hdr_size = offsetof(kib_msg_t, ibm_u);
-	__u32     msg_cksum;
-	__u16     version;
-	int       msg_nob;
-	int       flip;
+	__u32 msg_cksum;
+	__u16 version;
+	int msg_nob;
+	int flip;
 
 	/* 6 bytes are enough to have received magic + version */
 	if (nob < 6) {
@@ -320,10 +320,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
 
 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
 {
-	kib_peer_t	*peer;
-	kib_net_t	*net = ni->ni_data;
-	int		cpt = lnet_cpt_of_nid(nid);
-	unsigned long   flags;
+	kib_peer_t *peer;
+	kib_net_t *net = ni->ni_data;
+	int cpt = lnet_cpt_of_nid(nid);
+	unsigned long flags;
 
 	LASSERT(net != NULL);
 	LASSERT(nid != LNET_NID_ANY);
@@ -385,9 +385,9 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
 {
 	/* the caller is responsible for accounting the additional reference
 	 * that this creates */
-	struct list_head       *peer_list = kiblnd_nid2peerlist(nid);
-	struct list_head       *tmp;
-	kib_peer_t       *peer;
+	struct list_head *peer_list = kiblnd_nid2peerlist(nid);
+	struct list_head *tmp;
+	kib_peer_t *peer;
 
 	list_for_each(tmp, peer_list) {
 
@@ -422,10 +422,10 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer)
 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
 				lnet_nid_t *nidp, int *count)
 {
-	kib_peer_t	    *peer;
-	struct list_head	    *ptmp;
-	int		    i;
-	unsigned long	  flags;
+	kib_peer_t *peer;
+	struct list_head *ptmp;
+	int i;
+	unsigned long flags;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -459,9 +459,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
 
 static void kiblnd_del_peer_locked(kib_peer_t *peer)
 {
-	struct list_head	   *ctmp;
-	struct list_head	   *cnxt;
-	kib_conn_t	   *conn;
+	struct list_head *ctmp;
+	struct list_head *cnxt;
+	kib_conn_t *conn;
 
 	if (list_empty(&peer->ibp_conns)) {
 		kiblnd_unlink_peer_locked(peer);
@@ -480,14 +480,14 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer)
 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
 {
 	LIST_HEAD(zombies);
-	struct list_head	    *ptmp;
-	struct list_head	    *pnxt;
-	kib_peer_t	    *peer;
-	int		    lo;
-	int		    hi;
-	int		    i;
-	unsigned long	  flags;
-	int		    rc = -ENOENT;
+	struct list_head *ptmp;
+	struct list_head *pnxt;
+	kib_peer_t *peer;
+	int lo;
+	int hi;
+	int i;
+	unsigned long flags;
+	int rc = -ENOENT;
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -532,12 +532,12 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
 
 static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
 {
-	kib_peer_t	    *peer;
-	struct list_head	    *ptmp;
-	kib_conn_t	    *conn;
-	struct list_head	    *ctmp;
-	int		    i;
-	unsigned long	  flags;
+	kib_peer_t *peer;
+	struct list_head *ptmp;
+	kib_conn_t *conn;
+	struct list_head *ctmp;
+	int i;
+	unsigned long flags;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -593,7 +593,7 @@ int kiblnd_translate_mtu(int value)
 
 static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
 {
-	int	   mtu;
+	int mtu;
 
 	/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
 	if (cmid->route.path_rec = NULL)
@@ -607,11 +607,11 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
 
 static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
 {
-	cpumask_t	*mask;
-	int		vectors;
-	int		off;
-	int		i;
-	lnet_nid_t	nid = conn->ibc_peer->ibp_nid;
+	cpumask_t *mask;
+	int vectors;
+	int off;
+	int i;
+	lnet_nid_t nid = conn->ibc_peer->ibp_nid;
 
 	vectors = conn->ibc_cmid->device->num_comp_vectors;
 	if (vectors <= 1)
@@ -642,17 +642,17 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 	 * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
 	 * to destroy 'cmid' here since I'm called from the CM which still has
 	 * its ref on 'cmid'). */
-	rwlock_t		*glock = &kiblnd_data.kib_global_lock;
-	kib_net_t	      *net = peer->ibp_ni->ni_data;
-	kib_dev_t	      *dev;
+	rwlock_t *glock = &kiblnd_data.kib_global_lock;
+	kib_net_t *net = peer->ibp_ni->ni_data;
+	kib_dev_t *dev;
 	struct ib_qp_init_attr *init_qp_attr;
-	struct kib_sched_info	*sched;
-	kib_conn_t		*conn;
-	struct ib_cq		*cq;
-	unsigned long		flags;
-	int			cpt;
-	int			rc;
-	int			i;
+	struct kib_sched_info *sched;
+	kib_conn_t *conn;
+	struct ib_cq *cq;
+	unsigned long flags;
+	int cpt;
+	int rc;
+	int i;
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
@@ -837,8 +837,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 void kiblnd_destroy_conn(kib_conn_t *conn)
 {
 	struct rdma_cm_id *cmid = conn->ibc_cmid;
-	kib_peer_t	*peer = conn->ibc_peer;
-	int		rc;
+	kib_peer_t *peer = conn->ibc_peer;
+	int rc;
 
 	LASSERT(!in_interrupt());
 	LASSERT(atomic_read(&conn->ibc_refcount) = 0);
@@ -904,10 +904,10 @@ void kiblnd_destroy_conn(kib_conn_t *conn)
 
 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
 {
-	kib_conn_t	     *conn;
-	struct list_head	     *ctmp;
-	struct list_head	     *cnxt;
-	int		     count = 0;
+	kib_conn_t *conn;
+	struct list_head *ctmp;
+	struct list_head *cnxt;
+	int count = 0;
 
 	list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
 		conn = list_entry(ctmp, kib_conn_t, ibc_list);
@@ -926,10 +926,10 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
 				     int version, __u64 incarnation)
 {
-	kib_conn_t	     *conn;
-	struct list_head	     *ctmp;
-	struct list_head	     *cnxt;
-	int		     count = 0;
+	kib_conn_t *conn;
+	struct list_head *ctmp;
+	struct list_head *cnxt;
+	int count = 0;
 
 	list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
 		conn = list_entry(ctmp, kib_conn_t, ibc_list);
@@ -953,14 +953,14 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
 
 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
 {
-	kib_peer_t	     *peer;
-	struct list_head	     *ptmp;
-	struct list_head	     *pnxt;
-	int		     lo;
-	int		     hi;
-	int		     i;
-	unsigned long	   flags;
-	int		     count = 0;
+	kib_peer_t *peer;
+	struct list_head *ptmp;
+	struct list_head *pnxt;
+	int lo;
+	int hi;
+	int i;
+	unsigned long flags;
+	int count = 0;
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -1001,17 +1001,17 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 {
 	struct libcfs_ioctl_data *data = arg;
-	int		       rc = -EINVAL;
+	int rc = -EINVAL;
 
 	switch (cmd) {
 	case IOC_LIBCFS_GET_PEER: {
-		lnet_nid_t   nid = 0;
-		int	  count = 0;
+		lnet_nid_t nid = 0;
+		int count = 0;
 
 		rc = kiblnd_get_peer_info(ni, data->ioc_count,
 					  &nid, &count);
-		data->ioc_nid    = nid;
-		data->ioc_count  = count;
+		data->ioc_nid   = nid;
+		data->ioc_count = count;
 		break;
 	}
 
@@ -1053,11 +1053,11 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 
 void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 {
-	unsigned long	last_alive = 0;
-	unsigned long	now = cfs_time_current();
-	rwlock_t	*glock = &kiblnd_data.kib_global_lock;
-	kib_peer_t	*peer;
-	unsigned long	flags;
+	unsigned long last_alive = 0;
+	unsigned long now = cfs_time_current();
+	rwlock_t *glock = &kiblnd_data.kib_global_lock;
+	kib_peer_t *peer;
+	unsigned long flags;
 
 	read_lock_irqsave(glock, flags);
 
@@ -1086,8 +1086,8 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 
 void kiblnd_free_pages(kib_pages_t *p)
 {
-	int	npages = p->ibp_npages;
-	int	i;
+	int npages = p->ibp_npages;
+	int i;
 
 	for (i = 0; i < npages; i++) {
 		if (p->ibp_pages[i] != NULL)
@@ -1099,8 +1099,8 @@ void kiblnd_free_pages(kib_pages_t *p)
 
 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
 {
-	kib_pages_t	*p;
-	int		i;
+	kib_pages_t *p;
+	int i;
 
 	LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
 			 offsetof(kib_pages_t, ibp_pages[npages]));
@@ -1130,7 +1130,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
 void kiblnd_unmap_rx_descs(kib_conn_t *conn)
 {
 	kib_rx_t *rx;
-	int       i;
+	int i;
 
 	LASSERT(conn->ibc_rxs != NULL);
 	LASSERT(conn->ibc_hdev != NULL);
@@ -1153,14 +1153,13 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
 
 void kiblnd_map_rx_descs(kib_conn_t *conn)
 {
-	kib_rx_t       *rx;
-	struct page    *pg;
-	int	     pg_off;
-	int	     ipg;
-	int	     i;
+	kib_rx_t *rx;
+	struct page *pg;
+	int pg_off;
+	int ipg;
+	int i;
 
-	for (pg_off = ipg = i = 0;
-	     i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+	for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
 		pg = conn->ibc_rx_pages->ibp_pages[ipg];
 		rx = &conn->ibc_rxs[i];
 
@@ -1192,9 +1191,9 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
 
 static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
 {
-	kib_hca_dev_t  *hdev = tpo->tpo_hdev;
-	kib_tx_t       *tx;
-	int	     i;
+	kib_hca_dev_t *hdev = tpo->tpo_hdev;
+	kib_tx_t *tx;
+	int i;
 
 	LASSERT(tpo->tpo_pool.po_allocated = 0);
 
@@ -1216,8 +1215,8 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
 static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
 {
 	kib_hca_dev_t *hdev;
-	unsigned long  flags;
-	int	    i = 0;
+	unsigned long flags;
+	int i = 0;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 	while (dev->ibd_failover) {
@@ -1240,15 +1239,15 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
 
 static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
 {
-	kib_pages_t    *txpgs = tpo->tpo_tx_pages;
-	kib_pool_t     *pool  = &tpo->tpo_pool;
-	kib_net_t      *net   = pool->po_owner->ps_net;
-	kib_dev_t      *dev;
-	struct page    *page;
-	kib_tx_t       *tx;
-	int	     page_offset;
-	int	     ipage;
-	int	     i;
+	kib_pages_t *txpgs = tpo->tpo_tx_pages;
+	kib_pool_t *pool = &tpo->tpo_pool;
+	kib_net_t *net = pool->po_owner->ps_net;
+	kib_dev_t *dev;
+	struct page *page;
+	kib_tx_t *tx;
+	int page_offset;
+	int ipage;
+	int i;
 
 	LASSERT(net != NULL);
 
@@ -1291,7 +1290,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
 
 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
 {
-	__u64   index;
+	__u64 index;
 
 	LASSERT(hdev->ibh_mrs[0] != NULL);
 
@@ -1311,7 +1310,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
 {
 	struct ib_mr *prev_mr;
 	struct ib_mr *mr;
-	int	   i;
+	int i;
 
 	LASSERT(hdev->ibh_mrs[0] != NULL);
 
@@ -1382,18 +1381,18 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
 				  kib_fmr_pool_t **pp_fpo)
 {
 	/* FMR pool for RDMA */
-	kib_dev_t	       *dev = fps->fps_net->ibn_dev;
-	kib_fmr_pool_t	  *fpo;
+	kib_dev_t *dev = fps->fps_net->ibn_dev;
+	kib_fmr_pool_t *fpo;
 	struct ib_fmr_pool_param param = {
 		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
-		.page_shift	= PAGE_SHIFT,
-		.access	    = (IB_ACCESS_LOCAL_WRITE |
-				      IB_ACCESS_REMOTE_WRITE),
-		.pool_size	   = fps->fps_pool_size,
+		.page_shift        = PAGE_SHIFT,
+		.access            = (IB_ACCESS_LOCAL_WRITE |
+		                      IB_ACCESS_REMOTE_WRITE),
+		.pool_size         = fps->fps_pool_size,
 		.dirty_watermark   = fps->fps_flush_trigger,
 		.flush_function    = NULL,
-		.flush_arg	 = NULL,
-		.cache	     = !!*kiblnd_tunables.kib_fmr_cache};
+		.flush_arg         = NULL,
+		.cache             = !!*kiblnd_tunables.kib_fmr_cache};
 	int rc;
 
 	LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1454,7 +1453,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
 				   int flush_trigger)
 {
 	kib_fmr_pool_t *fpo;
-	int	     rc;
+	int rc;
 
 	memset(fps, 0, sizeof(kib_fmr_poolset_t));
 
@@ -1485,11 +1484,11 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
 {
 	LIST_HEAD(zombies);
-	kib_fmr_pool_t    *fpo = fmr->fmr_pool;
+	kib_fmr_pool_t *fpo = fmr->fmr_pool;
 	kib_fmr_poolset_t *fps = fpo->fpo_owner;
-	unsigned long	 now = cfs_time_current();
-	kib_fmr_pool_t    *tmp;
-	int		rc;
+	unsigned long now = cfs_time_current();
+	kib_fmr_pool_t *tmp;
+	int rc;
 
 	rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
 	LASSERT(rc = 0);
@@ -1525,9 +1524,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
 			__u64 iov, kib_fmr_t *fmr)
 {
 	struct ib_pool_fmr *pfmr;
-	kib_fmr_pool_t     *fpo;
-	__u64	       version;
-	int		 rc;
+	kib_fmr_pool_t *fpo;
+	__u64 version;
+	int rc;
 
  again:
 	spin_lock(&fps->fps_lock);
@@ -1658,13 +1657,13 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
 			       kib_ps_node_init_t nd_init,
 			       kib_ps_node_fini_t nd_fini)
 {
-	kib_pool_t	*pool;
-	int		rc;
+	kib_pool_t *pool;
+	int rc;
 
 	memset(ps, 0, sizeof(kib_poolset_t));
 
-	ps->ps_cpt	    = cpt;
-	ps->ps_net	  = net;
+	ps->ps_cpt          = cpt;
+	ps->ps_net          = net;
 	ps->ps_pool_create  = po_create;
 	ps->ps_pool_destroy = po_destroy;
 	ps->ps_node_init    = nd_init;
@@ -1698,9 +1697,9 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
 {
 	LIST_HEAD(zombies);
-	kib_poolset_t  *ps = pool->po_owner;
-	kib_pool_t     *tmp;
-	unsigned long      now = cfs_time_current();
+	kib_poolset_t *ps = pool->po_owner;
+	kib_pool_t *tmp;
+	unsigned long now = cfs_time_current();
 
 	spin_lock(&ps->ps_lock);
 
@@ -1727,9 +1726,9 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
 
 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
 {
-	struct list_head	    *node;
-	kib_pool_t	    *pool;
-	int		    rc;
+	struct list_head *node;
+	kib_pool_t *pool;
+	int rc;
 
  again:
 	spin_lock(&ps->ps_lock);
@@ -1789,8 +1788,8 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
 
 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
 {
-	kib_pmr_pool_t      *ppo = pmr->pmr_pool;
-	struct ib_mr	*mr  = pmr->pmr_mr;
+	kib_pmr_pool_t *ppo = pmr->pmr_pool;
+	struct ib_mr *mr = pmr->pmr_mr;
 
 	pmr->pmr_mr = NULL;
 	kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
@@ -1802,9 +1801,9 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
 		    kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
 {
 	kib_phys_mr_t *pmr;
-	struct list_head    *node;
-	int	    rc;
-	int	    i;
+	struct list_head *node;
+	int rc;
+	int i;
 
 	node = kiblnd_pool_alloc_node(&pps->pps_poolset);
 	if (node = NULL) {
@@ -1846,7 +1845,7 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
 static void kiblnd_destroy_pmr_pool(kib_pool_t *pool)
 {
 	kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
-	kib_phys_mr_t  *pmr;
+	kib_phys_mr_t *pmr;
 	kib_phys_mr_t *tmp;
 
 	LASSERT(pool->po_allocated = 0);
@@ -1881,10 +1880,10 @@ static inline int kiblnd_pmr_pool_size(int ncpts)
 static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
 				  kib_pool_t **pp_po)
 {
-	struct kib_pmr_pool	*ppo;
-	struct kib_pool		*pool;
-	kib_phys_mr_t		*pmr;
-	int			i;
+	struct kib_pmr_pool *ppo;
+	struct kib_pool *pool;
+	kib_phys_mr_t *pmr;
+	int i;
 
 	LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
 			 ps->ps_cpt, sizeof(kib_pmr_pool_t));
@@ -1923,8 +1922,8 @@ static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
 
 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
 {
-	kib_tx_pool_t  *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
-	int	     i;
+	kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
+	int i;
 
 	LASSERT(pool->po_allocated = 0);
 
@@ -1979,9 +1978,9 @@ static int kiblnd_tx_pool_size(int ncpts)
 static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
 				 kib_pool_t **pp_po)
 {
-	int	    i;
-	int	    npg;
-	kib_pool_t    *pool;
+	int i;
+	int npg;
+	kib_pool_t *pool;
 	kib_tx_pool_t *tpo;
 
 	LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
@@ -2064,19 +2063,19 @@ static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
 {
 	kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
 					     tps_poolset);
-	kib_tx_t	 *tx  = list_entry(node, kib_tx_t, tx_list);
+	kib_tx_t *tx  = list_entry(node, kib_tx_t, tx_list);
 
 	tx->tx_cookie = tps->tps_next_tx_cookie++;
 }
 
 static void kiblnd_net_fini_pools(kib_net_t *net)
 {
-	int	i;
+	int i;
 
 	cfs_cpt_for_each(i, lnet_cpt_table()) {
-		kib_tx_poolset_t	*tps;
-		kib_fmr_poolset_t	*fps;
-		kib_pmr_poolset_t	*pps;
+		kib_tx_poolset_t *tps;
+		kib_fmr_poolset_t *fps;
+		kib_pmr_poolset_t *pps;
 
 		if (net->ibn_tx_ps != NULL) {
 			tps = net->ibn_tx_ps[i];
@@ -2112,16 +2111,15 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
 
 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
 {
-	unsigned long	flags;
-	int		cpt;
-	int		rc;
-	int		i;
+	unsigned long flags;
+	int cpt;
+	int rc;
+	int i;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 	if (*kiblnd_tunables.kib_map_on_demand = 0 &&
 	    net->ibn_dev->ibd_hdev->ibh_nmrs = 1) {
-		read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-					   flags);
+		read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 		goto create_tx_pool;
 	}
 
@@ -2241,7 +2239,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
 {
 	struct ib_device_attr *attr;
-	int		    rc;
+	int rc;
 
 	/* It's safe to assume a HCA can handle a page size
 	 * matching that of the native system */
@@ -2284,7 +2282,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
 
 static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
 {
-	int     i;
+	int i;
 
 	if (hdev->ibh_nmrs = 0 || hdev->ibh_mrs = NULL)
 		return;
@@ -2317,12 +2315,11 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 {
 	struct ib_mr *mr;
-	int	   i;
-	int	   rc;
-	__u64	 mm_size;
-	__u64	 mr_size;
-	int	   acflags = IB_ACCESS_LOCAL_WRITE |
-				IB_ACCESS_REMOTE_WRITE;
+	int i;
+	int rc;
+	__u64 mm_size;
+	__u64 mr_size;
+	int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
 
 	rc = kiblnd_hdev_get_attr(hdev);
 	if (rc != 0)
@@ -2371,11 +2368,11 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 	for (i = 0; i < hdev->ibh_nmrs; i++) {
 		struct ib_phys_buf ipb;
-		__u64	      iova;
+		__u64 iova;
 
 		ipb.size = hdev->ibh_mr_size;
 		ipb.addr = i * mr_size;
-		iova     = ipb.addr;
+		iova = ipb.addr;
 
 		mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
 		if (IS_ERR(mr)) {
@@ -2406,10 +2403,10 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
 
 static int kiblnd_dev_need_failover(kib_dev_t *dev)
 {
-	struct rdma_cm_id  *cmid;
-	struct sockaddr_in  srcaddr;
-	struct sockaddr_in  dstaddr;
-	int		 rc;
+	struct rdma_cm_id *cmid;
+	struct sockaddr_in srcaddr;
+	struct sockaddr_in dstaddr;
+	int rc;
 
 	if (dev->ibd_hdev = NULL || /* initializing */
 	    dev->ibd_hdev->ibh_cmid = NULL || /* listener is dead */
@@ -2435,7 +2432,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
 	}
 
 	memset(&srcaddr, 0, sizeof(srcaddr));
-	srcaddr.sin_family      = AF_INET;
+	srcaddr.sin_family = AF_INET;
 	srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
 
 	memset(&dstaddr, 0, sizeof(dstaddr));
@@ -2464,15 +2461,15 @@ int kiblnd_dev_failover(kib_dev_t *dev)
 	LIST_HEAD(zombie_tpo);
 	LIST_HEAD(zombie_ppo);
 	LIST_HEAD(zombie_fpo);
-	struct rdma_cm_id  *cmid  = NULL;
-	kib_hca_dev_t      *hdev  = NULL;
-	kib_hca_dev_t      *old;
-	struct ib_pd       *pd;
-	kib_net_t	  *net;
-	struct sockaddr_in  addr;
-	unsigned long       flags;
-	int		 rc = 0;
-	int		    i;
+	struct rdma_cm_id *cmid  = NULL;
+	kib_hca_dev_t *hdev  = NULL;
+	kib_hca_dev_t *old;
+	struct ib_pd *pd;
+	kib_net_t *net;
+	struct sockaddr_in addr;
+	unsigned long flags;
+	int rc = 0;
+	int i;
 
 	LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
 		 dev->ibd_can_failover ||
@@ -2614,11 +2611,11 @@ void kiblnd_destroy_dev(kib_dev_t *dev)
 static kib_dev_t *kiblnd_create_dev(char *ifname)
 {
 	struct net_device *netdev;
-	kib_dev_t	 *dev;
-	__u32	      netmask;
-	__u32	      ip;
-	int		up;
-	int		rc;
+	kib_dev_t *dev;
+	__u32 netmask;
+	__u32 ip;
+	int up;
+	int rc;
 
 	rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
 	if (rc != 0) {
@@ -2665,8 +2662,8 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
 
 static void kiblnd_base_shutdown(void)
 {
-	struct kib_sched_info	*sched;
-	int			i;
+	struct kib_sched_info *sched;
+	int i;
 
 	LASSERT(list_empty(&kiblnd_data.kib_devs));
 
@@ -2732,10 +2729,10 @@ static void kiblnd_base_shutdown(void)
 
 void kiblnd_shutdown(lnet_ni_t *ni)
 {
-	kib_net_t	*net = ni->ni_data;
-	rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
-	int	       i;
-	unsigned long     flags;
+	kib_net_t *net = ni->ni_data;
+	rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+	int i;
+	unsigned long flags;
 
 	LASSERT(kiblnd_data.kib_init = IBLND_INIT_ALL);
 
@@ -2804,9 +2801,9 @@ out:
 
 static int kiblnd_base_startup(void)
 {
-	struct kib_sched_info	*sched;
-	int			rc;
-	int			i;
+	struct kib_sched_info *sched;
+	int rc;
+	int i;
 
 	LASSERT(kiblnd_data.kib_init = IBLND_INIT_NOTHING);
 
@@ -2821,8 +2818,7 @@ static int kiblnd_base_startup(void)
 
 	kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
 	LIBCFS_ALLOC(kiblnd_data.kib_peers,
-		     sizeof(struct list_head) *
-			    kiblnd_data.kib_peer_hash_size);
+		     sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
 	if (kiblnd_data.kib_peers = NULL)
 		goto failed;
 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
@@ -2840,7 +2836,7 @@ static int kiblnd_base_startup(void)
 		goto failed;
 
 	cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
-		int	nthrs;
+		int nthrs;
 
 		spin_lock_init(&sched->ibs_lock);
 		INIT_LIST_HEAD(&sched->ibs_conns);
@@ -2893,9 +2889,9 @@ static int kiblnd_base_startup(void)
 
 static int kiblnd_start_schedulers(struct kib_sched_info *sched)
 {
-	int	rc = 0;
-	int	nthrs;
-	int	i;
+	int rc = 0;
+	int nthrs;
+	int i;
 
 	if (sched->ibs_nthreads = 0) {
 		if (*kiblnd_tunables.kib_nscheds > 0) {
@@ -2913,8 +2909,8 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
 	}
 
 	for (i = 0; i < nthrs; i++) {
-		long	id;
-		char	name[20];
+		long id;
+		char name[20];
 
 		id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
 		snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
@@ -2935,9 +2931,9 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
 static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
 				    int ncpts)
 {
-	int	cpt;
-	int	rc;
-	int	i;
+	int cpt;
+	int rc;
+	int i;
 
 	for (i = 0; i < ncpts; i++) {
 		struct kib_sched_info *sched;
@@ -2960,10 +2956,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
 
 static kib_dev_t *kiblnd_dev_search(char *ifname)
 {
-	kib_dev_t	*alias = NULL;
-	kib_dev_t	*dev;
-	char		*colon;
-	char		*colon2;
+	kib_dev_t *alias = NULL;
+	kib_dev_t *dev;
+	char *colon;
+	char *colon2;
 
 	colon = strchr(ifname, ':');
 	list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
@@ -2992,13 +2988,13 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
 
 int kiblnd_startup(lnet_ni_t *ni)
 {
-	char		     *ifname;
-	kib_dev_t		*ibdev = NULL;
-	kib_net_t		*net;
-	struct timeval	    tv;
-	unsigned long	     flags;
-	int		       rc;
-	int			  newdev;
+	char *ifname;
+	kib_dev_t *ibdev = NULL;
+	kib_net_t *net;
+	struct timeval tv;
+	unsigned long flags;
+	int rc;
+	int newdev;
 
 	LASSERT(ni->ni_lnd = &the_o2iblnd);
 
@@ -3091,7 +3087,7 @@ static void __exit kiblnd_module_fini(void)
 
 static int __init kiblnd_module_init(void)
 {
-	int    rc;
+	int rc;
 
 	CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
 	CLASSERT(offsetof(kib_msg_t,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index cd664d0..7f52c69 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -80,42 +80,47 @@
 #define IBLND_N_SCHED_HIGH		4
 
 typedef struct {
-	int	      *kib_dev_failover;     /* HCA failover */
-	unsigned int     *kib_service;	  /* IB service number */
-	int	      *kib_min_reconnect_interval; /* first failed connection retry... */
-	int	      *kib_max_reconnect_interval; /* ...exponentially increasing to this */
-	int	      *kib_cksum;	    /* checksum kib_msg_t? */
-	int	      *kib_timeout;	  /* comms timeout (seconds) */
-	int	      *kib_keepalive;	/* keepalive timeout (seconds) */
-	int	      *kib_ntx;	      /* # tx descs */
-	int	      *kib_credits;	  /* # concurrent sends */
-	int	      *kib_peertxcredits;    /* # concurrent sends to 1 peer */
-	int	      *kib_peerrtrcredits;   /* # per-peer router buffer credits */
-	int	      *kib_peercredits_hiw;  /* # when eagerly to return credits */
-	int	      *kib_peertimeout;      /* seconds to consider peer dead */
-	char	    **kib_default_ipif;     /* default IPoIB interface */
-	int	      *kib_retry_count;
-	int	      *kib_rnr_retry_count;
-	int	      *kib_concurrent_sends; /* send work queue sizing */
-	int		 *kib_ib_mtu;		/* IB MTU */
-	int	      *kib_map_on_demand;    /* map-on-demand if RD has more fragments
-						 * than this value, 0 disable map-on-demand */
-	int	      *kib_pmr_pool_size;    /* # physical MR in pool */
-	int	      *kib_fmr_pool_size;    /* # FMRs in pool */
-	int	      *kib_fmr_flush_trigger; /* When to trigger FMR flush */
-	int	      *kib_fmr_cache;	/* enable FMR pool cache? */
-	int	      *kib_require_priv_port;/* accept only privileged ports */
-	int	      *kib_use_priv_port;    /* use privileged port for active connect */
-	/* # threads on each CPT */
-	int		 *kib_nscheds;
+	int          *kib_dev_failover;      /* HCA failover */
+	unsigned int *kib_service;           /* IB service number */
+	int          *kib_min_reconnect_interval; /* first failed connection
+						   * retry... */
+	int          *kib_max_reconnect_interval; /* ...exponentially increasing
+						   * to this */
+	int          *kib_cksum;             /* checksum kib_msg_t? */
+	int          *kib_timeout;           /* comms timeout (seconds) */
+	int          *kib_keepalive;         /* keepalive timeout (seconds) */
+	int          *kib_ntx;               /* # tx descs */
+	int          *kib_credits;           /* # concurrent sends */
+	int          *kib_peertxcredits;     /* # concurrent sends to 1 peer */
+	int          *kib_peerrtrcredits;    /* # per-peer router buffer
+					      * credits */
+	int          *kib_peercredits_hiw;   /* # when eagerly to return
+					      * credits */
+	int          *kib_peertimeout;       /* seconds to consider peer dead */
+	char         **kib_default_ipif;     /* default IPoIB interface */
+	int          *kib_retry_count;
+	int          *kib_rnr_retry_count;
+	int          *kib_concurrent_sends;  /* send work queue sizing */
+	int          *kib_ib_mtu;            /* IB MTU */
+	int          *kib_map_on_demand;     /* map-on-demand if RD has more
+					      * fragments than this value, 0
+					      * disable map-on-demand */
+	int          *kib_pmr_pool_size;     /* # physical MR in pool */
+	int          *kib_fmr_pool_size;     /* # FMRs in pool */
+	int          *kib_fmr_flush_trigger; /* When to trigger FMR flush */
+	int          *kib_fmr_cache;         /* enable FMR pool cache? */
+	int          *kib_require_priv_port; /* accept only privileged ports */
+	int          *kib_use_priv_port;     /* use privileged port for active
+					      * connect */
+	int          *kib_nscheds;           /* # threads on each CPT */
 } kib_tunables_t;
 
 extern kib_tunables_t  kiblnd_tunables;
 
-#define IBLND_MSG_QUEUE_SIZE_V1      8	  /* V1 only : # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER_V1    7	  /* V1 only : when eagerly to return credits */
+#define IBLND_MSG_QUEUE_SIZE_V1   8 /* V1 only : # messages/RDMAs in-flight */
+#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
 
-#define IBLND_CREDITS_DEFAULT	8	  /* default # of peer credits */
+#define IBLND_CREDITS_DEFAULT     8 /* default # of peer credits */
 #define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
 #define IBLND_MSG_QUEUE_SIZE(v)    ((v) = IBLND_MSG_VERSION_1 ? \
@@ -186,34 +191,36 @@ struct kib_hca_dev;
 #endif
 
 typedef struct {
-	struct list_head	   ibd_list;	  /* chain on kib_devs */
-	struct list_head	   ibd_fail_list;     /* chain on kib_failed_devs */
-	__u32		ibd_ifip;	  /* IPoIB interface IP */
-	/** IPoIB interface name */
-	char		 ibd_ifname[KIB_IFNAME_SIZE];
-	int		  ibd_nnets;	 /* # nets extant */
-
-	unsigned long	   ibd_next_failover;
-	int		  ibd_failed_failover; /* # failover failures */
-	unsigned int	 ibd_failover;      /* failover in progress */
-	unsigned int	 ibd_can_failover;  /* IPoIB interface is a bonding master */
-	struct list_head	   ibd_nets;
-	struct kib_hca_dev  *ibd_hdev;
+	struct list_head   ibd_list;            /* chain on kib_devs */
+	struct list_head   ibd_fail_list;       /* chain on kib_failed_devs */
+	__u32              ibd_ifip;            /* IPoIB interface IP */
+
+	/* IPoIB interface name */
+	char               ibd_ifname[KIB_IFNAME_SIZE];
+	int                ibd_nnets;           /* # nets extant */
+
+	unsigned long      ibd_next_failover;
+	int                ibd_failed_failover; /* # failover failures */
+	unsigned int       ibd_failover;        /* failover in progress */
+	unsigned int       ibd_can_failover;    /* IPoIB interface is a bonding
+						 * master */
+	struct list_head   ibd_nets;
+	struct kib_hca_dev *ibd_hdev;
 } kib_dev_t;
 
 typedef struct kib_hca_dev {
-	struct rdma_cm_id   *ibh_cmid;	  /* listener cmid */
-	struct ib_device    *ibh_ibdev;	 /* IB device */
-	int		  ibh_page_shift;    /* page shift of current HCA */
-	int		  ibh_page_size;     /* page size of current HCA */
-	__u64		ibh_page_mask;     /* page mask of current HCA */
-	int		  ibh_mr_shift;      /* bits shift of max MR size */
-	__u64		ibh_mr_size;       /* size of MR */
-	int		  ibh_nmrs;	  /* # of global MRs */
-	struct ib_mr       **ibh_mrs;	   /* global MR */
-	struct ib_pd	*ibh_pd;	    /* PD */
-	kib_dev_t	   *ibh_dev;	   /* owner */
-	atomic_t	 ibh_ref;	   /* refcount */
+	struct rdma_cm_id  *ibh_cmid;           /* listener cmid */
+	struct ib_device   *ibh_ibdev;          /* IB device */
+	int                ibh_page_shift;      /* page shift of current HCA */
+	int                ibh_page_size;       /* page size of current HCA */
+	__u64              ibh_page_mask;       /* page mask of current HCA */
+	int                ibh_mr_shift;        /* bits shift of max MR size */
+	__u64              ibh_mr_size;         /* size of MR */
+	int                ibh_nmrs;            /* # of global MRs */
+	struct ib_mr       **ibh_mrs;           /* global MR */
+	struct ib_pd       *ibh_pd;             /* PD */
+	kib_dev_t          *ibh_dev;            /* owner */
+	atomic_t           ibh_ref;             /* refcount */
 } kib_hca_dev_t;
 
 /** # of seconds to keep pool alive */
@@ -222,19 +229,19 @@ typedef struct kib_hca_dev {
 #define IBLND_POOL_RETRY	1
 
 typedef struct {
-	int		     ibp_npages;	     /* # pages */
-	struct page	    *ibp_pages[0];	   /* page array */
+	int                ibp_npages;          /* # pages */
+	struct page        *ibp_pages[0];       /* page array */
 } kib_pages_t;
 
 struct kib_pmr_pool;
 
 typedef struct {
-	struct list_head	      pmr_list;	       /* chain node */
-	struct ib_phys_buf     *pmr_ipb;		/* physical buffer */
-	struct ib_mr	   *pmr_mr;		 /* IB MR */
-	struct kib_pmr_pool    *pmr_pool;	       /* owner of this MR */
-	__u64		   pmr_iova;	       /* Virtual I/O address */
-	int		     pmr_refcount;	   /* reference count */
+	struct list_head    pmr_list;           /* chain node */
+	struct ib_phys_buf  *pmr_ipb;           /* physical buffer */
+	struct ib_mr        *pmr_mr;            /* IB MR */
+	struct kib_pmr_pool *pmr_pool;          /* owner of this MR */
+	__u64               pmr_iova;           /* Virtual I/O address */
+	int                 pmr_refcount;       /* reference count */
 } kib_phys_mr_t;
 
 struct kib_pool;
@@ -251,97 +258,99 @@ struct kib_net;
 #define IBLND_POOL_NAME_LEN     32
 
 typedef struct kib_poolset {
-	spinlock_t		ps_lock;		/* serialize */
-	struct kib_net	 *ps_net;		 /* network it belongs to */
-	char		    ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
-	struct list_head	      ps_pool_list;	   /* list of pools */
-	struct list_head	      ps_failed_pool_list;    /* failed pool list */
-	unsigned long	      ps_next_retry;	  /* time stamp for retry if failed to allocate */
-	int		     ps_increasing;	  /* is allocating new pool */
-	int		     ps_pool_size;	   /* new pool size */
-	int			ps_cpt;			/* CPT id */
-
-	kib_ps_pool_create_t    ps_pool_create;	 /* create a new pool */
-	kib_ps_pool_destroy_t   ps_pool_destroy;	/* destroy a pool */
-	kib_ps_node_init_t      ps_node_init;	   /* initialize new allocated node */
-	kib_ps_node_fini_t      ps_node_fini;	   /* finalize node */
+	spinlock_t            ps_lock;            /* serialize */
+	struct kib_net        *ps_net;            /* network it belongs to */
+	char                  ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
+	struct list_head      ps_pool_list;       /* list of pools */
+	struct list_head      ps_failed_pool_list;/* failed pool list */
+	unsigned long         ps_next_retry;      /* time stamp for retry if
+						   * failed to allocate */
+	int                   ps_increasing;      /* is allocating new pool */
+	int                   ps_pool_size;       /* new pool size */
+	int                   ps_cpt;             /* CPT id */
+
+	kib_ps_pool_create_t  ps_pool_create;     /* create a new pool */
+	kib_ps_pool_destroy_t ps_pool_destroy;    /* destroy a pool */
+	kib_ps_node_init_t    ps_node_init;       /* initialize new allocated
+						   * node */
+	kib_ps_node_fini_t    ps_node_fini;       /* finalize node */
 } kib_poolset_t;
 
 typedef struct kib_pool {
-	struct list_head	      po_list;		/* chain on pool list */
-	struct list_head	      po_free_list;	   /* pre-allocated node */
-	kib_poolset_t	  *po_owner;	       /* pool_set of this pool */
-	unsigned long	      po_deadline;	    /* deadline of this pool */
-	int		     po_allocated;	   /* # of elements in use */
-	int		     po_failed;	      /* pool is created on failed HCA */
-	int		     po_size;		/* # of pre-allocated elements */
+	struct list_head      po_list;         /* chain on pool list */
+	struct list_head      po_free_list;    /* pre-allocated node */
+	kib_poolset_t         *po_owner;       /* pool_set of this pool */
+	unsigned long         po_deadline;     /* deadline of this pool */
+	int                   po_allocated;    /* # of elements in use */
+	int                   po_failed;       /* pool is created on failed
+						* HCA */
+	int                   po_size;         /* # of pre-allocated elements */
 } kib_pool_t;
 
 typedef struct {
-	kib_poolset_t	   tps_poolset;	    /* pool-set */
-	__u64		   tps_next_tx_cookie;     /* cookie of TX */
+	kib_poolset_t         tps_poolset;        /* pool-set */
+	__u64                 tps_next_tx_cookie; /* cookie of TX */
 } kib_tx_poolset_t;
 
 typedef struct {
-	kib_pool_t	      tpo_pool;	       /* pool */
-	struct kib_hca_dev     *tpo_hdev;	       /* device for this pool */
-	struct kib_tx	  *tpo_tx_descs;	   /* all the tx descriptors */
-	kib_pages_t	    *tpo_tx_pages;	   /* premapped tx msg pages */
+	kib_pool_t            tpo_pool;           /* pool */
+	struct kib_hca_dev    *tpo_hdev;          /* device for this pool */
+	struct kib_tx         *tpo_tx_descs;      /* all the tx descriptors */
+	kib_pages_t           *tpo_tx_pages;      /* premapped tx msg pages */
 } kib_tx_pool_t;
 
 typedef struct {
-	kib_poolset_t	   pps_poolset;	    /* pool-set */
+	kib_poolset_t         pps_poolset;        /* pool-set */
 } kib_pmr_poolset_t;
 
 typedef struct kib_pmr_pool {
-	struct kib_hca_dev     *ppo_hdev;	       /* device for this pool */
-	kib_pool_t	      ppo_pool;	       /* pool */
+	struct kib_hca_dev    *ppo_hdev;          /* device for this pool */
+	kib_pool_t            ppo_pool;           /* pool */
 } kib_pmr_pool_t;
 
 typedef struct {
-	spinlock_t		fps_lock;		/* serialize */
-	struct kib_net	 *fps_net;		/* IB network */
-	struct list_head	      fps_pool_list;	  /* FMR pool list */
-	struct list_head	      fps_failed_pool_list;   /* FMR pool list */
-	__u64		   fps_version;	    /* validity stamp */
-	int			fps_cpt;		/* CPT id */
-	int			fps_pool_size;
-	int			fps_flush_trigger;
-	/* is allocating new pool */
-	int			fps_increasing;
-	/* time stamp for retry if failed to allocate */
-	unsigned long		fps_next_retry;
+	spinlock_t            fps_lock;            /* serialize */
+	struct kib_net        *fps_net;            /* IB network */
+	struct list_head      fps_pool_list;       /* FMR pool list */
+	struct list_head      fps_failed_pool_list;/* FMR pool list */
+	__u64                 fps_version;         /* validity stamp */
+	int                   fps_cpt;             /* CPT id */
+	int                   fps_pool_size;
+	int                   fps_flush_trigger;
+	int                   fps_increasing;      /* is allocating new pool */
+	unsigned long         fps_next_retry;      /* time stamp for retry if
+						    * failed to allocate */
 } kib_fmr_poolset_t;
 
 typedef struct {
-	struct list_head	      fpo_list;	       /* chain on pool list */
-	struct kib_hca_dev     *fpo_hdev;	       /* device for this pool */
-	kib_fmr_poolset_t      *fpo_owner;	      /* owner of this pool */
-	struct ib_fmr_pool     *fpo_fmr_pool;	   /* IB FMR pool */
-	unsigned long	      fpo_deadline;	   /* deadline of this pool */
-	int		     fpo_failed;	     /* fmr pool is failed */
-	int		     fpo_map_count;	  /* # of mapped FMR */
+	struct list_head      fpo_list;            /* chain on pool list */
+	struct kib_hca_dev    *fpo_hdev;           /* device for this pool */
+	kib_fmr_poolset_t     *fpo_owner;          /* owner of this pool */
+	struct ib_fmr_pool    *fpo_fmr_pool;       /* IB FMR pool */
+	unsigned long         fpo_deadline;        /* deadline of this pool */
+	int                   fpo_failed;          /* fmr pool is failed */
+	int                   fpo_map_count;       /* # of mapped FMR */
 } kib_fmr_pool_t;
 
 typedef struct {
-	struct ib_pool_fmr     *fmr_pfmr;	       /* IB pool fmr */
-	kib_fmr_pool_t	 *fmr_pool;	       /* pool of FMR */
+	struct ib_pool_fmr    *fmr_pfmr;           /* IB pool fmr */
+	kib_fmr_pool_t        *fmr_pool;           /* pool of FMR */
 } kib_fmr_t;
 
 typedef struct kib_net {
-	struct list_head	   ibn_list;	  /* chain on kib_dev_t::ibd_nets */
-	__u64		ibn_incarnation;   /* my epoch */
-	int		  ibn_init;	  /* initialisation state */
-	int		  ibn_shutdown;      /* shutting down? */
+	struct list_head      ibn_list;       /* chain on kib_dev_t::ibd_nets */
+	__u64                 ibn_incarnation;/* my epoch */
+	int                   ibn_init;       /* initialisation state */
+	int                   ibn_shutdown;   /* shutting down? */
 
-	atomic_t		ibn_npeers;	/* # peers extant */
-	atomic_t		ibn_nconns;	/* # connections extant */
+	atomic_t              ibn_npeers;     /* # peers extant */
+	atomic_t              ibn_nconns;     /* # connections extant */
 
-	kib_tx_poolset_t	**ibn_tx_ps;	/* tx pool-set */
-	kib_fmr_poolset_t	**ibn_fmr_ps;	/* fmr pool-set */
-	kib_pmr_poolset_t	**ibn_pmr_ps;	/* pmr pool-set */
+	kib_tx_poolset_t      **ibn_tx_ps;    /* tx pool-set */
+	kib_fmr_poolset_t     **ibn_fmr_ps;   /* fmr pool-set */
+	kib_pmr_poolset_t     **ibn_pmr_ps;   /* pmr pool-set */
 
-	kib_dev_t		*ibn_dev;	/* underlying IB device */
+	kib_dev_t             *ibn_dev;       /* underlying IB device */
 } kib_net_t;
 
 #define KIB_THREAD_SHIFT		16
@@ -350,51 +359,45 @@ typedef struct kib_net {
 #define KIB_THREAD_TID(id)		((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
 
 struct kib_sched_info {
-	/* serialise */
-	spinlock_t		ibs_lock;
-	/* schedulers sleep here */
-	wait_queue_head_t		ibs_waitq;
-	/* conns to check for rx completions */
-	struct list_head		ibs_conns;
-	/* number of scheduler threads */
-	int			ibs_nthreads;
-	/* max allowed scheduler threads */
-	int			ibs_nthreads_max;
-	int			ibs_cpt;	/* CPT id */
+	spinlock_t         ibs_lock;     /* serialise */
+	wait_queue_head_t  ibs_waitq;    /* schedulers sleep here */
+	struct list_head   ibs_conns;    /* conns to check for rx completions */
+	int                ibs_nthreads; /* number of scheduler threads */
+	int                ibs_nthreads_max; /* max allowed scheduler threads */
+	int                ibs_cpt;      /* CPT id */
 };
 
 typedef struct {
-	int			kib_init;	/* initialisation state */
-	int			kib_shutdown;	/* shut down? */
-	struct list_head		kib_devs;	/* IB devices extant */
-	/* list head of failed devices */
-	struct list_head		kib_failed_devs;
-	/* schedulers sleep here */
-	wait_queue_head_t		kib_failover_waitq;
-	atomic_t		kib_nthreads;	/* # live threads */
-	/* stabilize net/dev/peer/conn ops */
-	rwlock_t		kib_global_lock;
-	/* hash table of all my known peers */
-	struct list_head		*kib_peers;
-	/* size of kib_peers */
-	int			kib_peer_hash_size;
-	/* the connd task (serialisation assertions) */
-	void			*kib_connd;
-	/* connections to setup/teardown */
-	struct list_head		kib_connd_conns;
-	/* connections with zero refcount */
-	struct list_head		kib_connd_zombies;
-	/* connection daemon sleeps here */
-	wait_queue_head_t		kib_connd_waitq;
-	spinlock_t		kib_connd_lock;	/* serialise */
-	struct ib_qp_attr	kib_error_qpa;	/* QP->ERROR */
-	/* percpt data for schedulers */
-	struct kib_sched_info	**kib_scheds;
+	int                   kib_init;           /* initialisation state */
+	int                   kib_shutdown;       /* shut down? */
+	struct list_head      kib_devs;           /* IB devices extant */
+	struct list_head      kib_failed_devs;    /* list head of failed
+						   * devices */
+	wait_queue_head_t     kib_failover_waitq; /* schedulers sleep here */
+	atomic_t              kib_nthreads;       /* # live threads */
+	rwlock_t              kib_global_lock;    /* stabilize net/dev/peer/conn
+						   * ops */
+	struct list_head      *kib_peers;         /* hash table of all my known
+						   * peers */
+	int                   kib_peer_hash_size; /* size of kib_peers */
+	void                  *kib_connd;         /* the connd task
+						   * (serialisation assertions)
+						   */
+	struct list_head      kib_connd_conns;    /* connections to
+						   * setup/teardown */
+	struct list_head      kib_connd_zombies;  /* connections with zero
+						   * refcount */
+	wait_queue_head_t     kib_connd_waitq;    /* connection daemon sleeps
+						   * here */
+	spinlock_t            kib_connd_lock;     /* serialise */
+	struct ib_qp_attr     kib_error_qpa;      /* QP->ERROR */
+	struct kib_sched_info **kib_scheds;       /* percpt data for schedulers
+						   */
 } kib_data_t;
 
-#define IBLND_INIT_NOTHING	 0
-#define IBLND_INIT_DATA	    1
-#define IBLND_INIT_ALL	     2
+#define IBLND_INIT_NOTHING 0
+#define IBLND_INIT_DATA    1
+#define IBLND_INIT_ALL     2
 
 /************************************************************************
  * IB Wire message format.
@@ -402,228 +405,243 @@ typedef struct {
  */
 
 typedef struct kib_connparams {
-	__u16	     ibcp_queue_depth;
-	__u16	     ibcp_max_frags;
-	__u32	     ibcp_max_msg_size;
+	__u16        ibcp_queue_depth;
+	__u16        ibcp_max_frags;
+	__u32        ibcp_max_msg_size;
 } WIRE_ATTR kib_connparams_t;
 
 typedef struct {
-	lnet_hdr_t	ibim_hdr;	     /* portals header */
-	char	      ibim_payload[0];      /* piggy-backed payload */
+	lnet_hdr_t   ibim_hdr;        /* portals header */
+	char         ibim_payload[0]; /* piggy-backed payload */
 } WIRE_ATTR kib_immediate_msg_t;
 
 typedef struct {
-	__u32	     rf_nob;	       /* # bytes this frag */
-	__u64	     rf_addr;	      /* CAVEAT EMPTOR: misaligned!! */
+	__u32        rf_nob;          /* # bytes this frag */
+	__u64        rf_addr;         /* CAVEAT EMPTOR: misaligned!! */
 } WIRE_ATTR kib_rdma_frag_t;
 
 typedef struct {
-	__u32	     rd_key;	       /* local/remote key */
-	__u32	     rd_nfrags;	    /* # fragments */
-	kib_rdma_frag_t   rd_frags[0];	  /* buffer frags */
+	__u32           rd_key;       /* local/remote key */
+	__u32           rd_nfrags;    /* # fragments */
+	kib_rdma_frag_t rd_frags[0];  /* buffer frags */
 } WIRE_ATTR kib_rdma_desc_t;
 
 typedef struct {
-	lnet_hdr_t	ibprm_hdr;	    /* portals header */
-	__u64	     ibprm_cookie;	 /* opaque completion cookie */
+	lnet_hdr_t      ibprm_hdr;    /* portals header */
+	__u64           ibprm_cookie; /* opaque completion cookie */
 } WIRE_ATTR kib_putreq_msg_t;
 
 typedef struct {
-	__u64	     ibpam_src_cookie;     /* reflected completion cookie */
-	__u64	     ibpam_dst_cookie;     /* opaque completion cookie */
-	kib_rdma_desc_t   ibpam_rd;	     /* sender's sink buffer */
+	__u64           ibpam_src_cookie; /* reflected completion cookie */
+	__u64           ibpam_dst_cookie; /* opaque completion cookie */
+	kib_rdma_desc_t ibpam_rd;         /* sender's sink buffer */
 } WIRE_ATTR kib_putack_msg_t;
 
 typedef struct {
-	lnet_hdr_t	ibgm_hdr;	     /* portals header */
-	__u64	     ibgm_cookie;	  /* opaque completion cookie */
-	kib_rdma_desc_t   ibgm_rd;	      /* rdma descriptor */
+	lnet_hdr_t      ibgm_hdr;     /* portals header */
+	__u64           ibgm_cookie;  /* opaque completion cookie */
+	kib_rdma_desc_t ibgm_rd;      /* rdma descriptor */
 } WIRE_ATTR kib_get_msg_t;
 
 typedef struct {
-	__u64	     ibcm_cookie;	  /* opaque completion cookie */
-	__s32	     ibcm_status;	  /* < 0 failure: >= 0 length */
+	__u64           ibcm_cookie;  /* opaque completion cookie */
+	__s32           ibcm_status;  /* < 0 failure: >= 0 length */
 } WIRE_ATTR kib_completion_msg_t;
 
 typedef struct {
 	/* First 2 fields fixed FOR ALL TIME */
-	__u32	     ibm_magic;	    /* I'm an ibnal message */
-	__u16	     ibm_version;	  /* this is my version number */
-
-	__u8	      ibm_type;	     /* msg type */
-	__u8	      ibm_credits;	  /* returned credits */
-	__u32	     ibm_nob;	      /* # bytes in whole message */
-	__u32	     ibm_cksum;	    /* checksum (0 = no checksum) */
-	__u64	     ibm_srcnid;	   /* sender's NID */
-	__u64	     ibm_srcstamp;	 /* sender's incarnation */
-	__u64	     ibm_dstnid;	   /* destination's NID */
-	__u64	     ibm_dststamp;	 /* destination's incarnation */
+	__u32           ibm_magic;    /* I'm an ibnal message */
+	__u16           ibm_version;  /* this is my version number */
+
+	__u8            ibm_type;     /* msg type */
+	__u8            ibm_credits;  /* returned credits */
+	__u32           ibm_nob;      /* # bytes in whole message */
+	__u32           ibm_cksum;    /* checksum (0 = no checksum) */
+	__u64           ibm_srcnid;   /* sender's NID */
+	__u64           ibm_srcstamp; /* sender's incarnation */
+	__u64           ibm_dstnid;   /* destination's NID */
+	__u64           ibm_dststamp; /* destination's incarnation */
 
 	union {
-		kib_connparams_t      connparams;
-		kib_immediate_msg_t   immediate;
-		kib_putreq_msg_t      putreq;
-		kib_putack_msg_t      putack;
-		kib_get_msg_t	 get;
-		kib_completion_msg_t  completion;
+		kib_connparams_t     connparams;
+		kib_immediate_msg_t  immediate;
+		kib_putreq_msg_t     putreq;
+		kib_putack_msg_t     putack;
+		kib_get_msg_t        get;
+		kib_completion_msg_t completion;
 	} WIRE_ATTR ibm_u;
 } WIRE_ATTR kib_msg_t;
 
-#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC	/* unique magic */
+#define IBLND_MSG_MAGIC     LNET_PROTO_IB_MAGIC /* unique magic */
 
-#define IBLND_MSG_VERSION_1	 0x11
-#define IBLND_MSG_VERSION_2	 0x12
-#define IBLND_MSG_VERSION	   IBLND_MSG_VERSION_2
+#define IBLND_MSG_VERSION_1 0x11
+#define IBLND_MSG_VERSION_2 0x12
+#define IBLND_MSG_VERSION   IBLND_MSG_VERSION_2
 
-#define IBLND_MSG_CONNREQ	   0xc0	/* connection request */
-#define IBLND_MSG_CONNACK	   0xc1	/* connection acknowledge */
-#define IBLND_MSG_NOOP	      0xd0	/* nothing (just credits) */
-#define IBLND_MSG_IMMEDIATE	 0xd1	/* immediate */
-#define IBLND_MSG_PUT_REQ	   0xd2	/* putreq (src->sink) */
-#define IBLND_MSG_PUT_NAK	   0xd3	/* completion (sink->src) */
-#define IBLND_MSG_PUT_ACK	   0xd4	/* putack (sink->src) */
-#define IBLND_MSG_PUT_DONE	  0xd5	/* completion (src->sink) */
-#define IBLND_MSG_GET_REQ	   0xd6	/* getreq (sink->src) */
-#define IBLND_MSG_GET_DONE	  0xd7	/* completion (src->sink: all OK) */
+#define IBLND_MSG_CONNREQ   0xc0	/* connection request */
+#define IBLND_MSG_CONNACK   0xc1	/* connection acknowledge */
+#define IBLND_MSG_NOOP      0xd0	/* nothing (just credits) */
+#define IBLND_MSG_IMMEDIATE 0xd1	/* immediate */
+#define IBLND_MSG_PUT_REQ   0xd2	/* putreq (src->sink) */
+#define IBLND_MSG_PUT_NAK   0xd3	/* completion (sink->src) */
+#define IBLND_MSG_PUT_ACK   0xd4	/* putack (sink->src) */
+#define IBLND_MSG_PUT_DONE  0xd5	/* completion (src->sink) */
+#define IBLND_MSG_GET_REQ   0xd6	/* getreq (sink->src) */
+#define IBLND_MSG_GET_DONE  0xd7	/* completion (src->sink: all OK) */
 
 typedef struct {
-	__u32	    ibr_magic;	     /* sender's magic */
-	__u16	    ibr_version;	   /* sender's version */
-	__u8	     ibr_why;	       /* reject reason */
-	__u8	     ibr_padding;	   /* padding */
-	__u64	    ibr_incarnation;       /* incarnation of peer */
-	kib_connparams_t ibr_cp;		/* connection parameters */
+	__u32            ibr_magic;       /* sender's magic */
+	__u16            ibr_version;     /* sender's version */
+	__u8             ibr_why;         /* reject reason */
+	__u8             ibr_padding;     /* padding */
+	__u64            ibr_incarnation; /* incarnation of peer */
+	kib_connparams_t ibr_cp;          /* connection parameters */
 } WIRE_ATTR kib_rej_t;
 
 /* connection rejection reasons */
-#define IBLND_REJECT_CONN_RACE       1	  /* You lost connection race */
-#define IBLND_REJECT_NO_RESOURCES    2	  /* Out of memory/conns etc */
-#define IBLND_REJECT_FATAL	   3	  /* Anything else */
-
-#define IBLND_REJECT_CONN_UNCOMPAT   4	  /* incompatible version peer */
-#define IBLND_REJECT_CONN_STALE      5	  /* stale peer */
-
-#define IBLND_REJECT_RDMA_FRAGS      6	  /* Fatal: peer's rdma frags can't match mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE  7	  /* Fatal: peer's msg queue size can't match mine */
+#define IBLND_REJECT_CONN_RACE      1 /* You lost connection race */
+#define IBLND_REJECT_NO_RESOURCES   2 /* Out of memory/conns etc */
+#define IBLND_REJECT_FATAL          3 /* Anything else */
+#define IBLND_REJECT_CONN_UNCOMPAT  4 /* incompatible version peer */
+#define IBLND_REJECT_CONN_STALE     5 /* stale peer */
+#define IBLND_REJECT_RDMA_FRAGS     6 /* Fatal: peer's rdma frags can't match
+				       * mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't
+				       * match mine */
 
 /***********************************************************************/
 
-typedef struct kib_rx			   /* receive message */
+typedef struct kib_rx                         /* receive message */
 {
-	struct list_head		rx_list;      /* queue for attention */
-	struct kib_conn	  *rx_conn;      /* owning conn */
-	int		       rx_nob;       /* # bytes received (-1 while posted) */
-	enum ib_wc_status	 rx_status;    /* completion status */
-	kib_msg_t		*rx_msg;       /* message buffer (host vaddr) */
-	__u64		     rx_msgaddr;   /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR   (rx_msgunmap); /* for dma_unmap_single() */
-	struct ib_recv_wr	 rx_wrq;       /* receive work item... */
-	struct ib_sge	     rx_sge;       /* ...and its memory */
+	struct list_head       rx_list;       /* queue for attention */
+	struct kib_conn        *rx_conn;      /* owning conn */
+	int                    rx_nob;        /* # bytes received (-1 while
+					       * posted) */
+	enum ib_wc_status      rx_status;     /* completion status */
+	kib_msg_t              *rx_msg;       /* message buffer (host vaddr) */
+	__u64                  rx_msgaddr;    /* message buffer (I/O addr) */
+	DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+	struct ib_recv_wr      rx_wrq;        /* receive work item... */
+	struct ib_sge          rx_sge;        /* ...and its memory */
 } kib_rx_t;
 
-#define IBLND_POSTRX_DONT_POST    0	     /* don't post */
-#define IBLND_POSTRX_NO_CREDIT    1	     /* post: no credits */
-#define IBLND_POSTRX_PEER_CREDIT  2	     /* post: give peer back 1 credit */
-#define IBLND_POSTRX_RSRVD_CREDIT 3	     /* post: give myself back 1 reserved credit */
+#define IBLND_POSTRX_DONT_POST    0   /* don't post */
+#define IBLND_POSTRX_NO_CREDIT    1   /* post: no credits */
+#define IBLND_POSTRX_PEER_CREDIT  2   /* post: give peer back 1 credit */
+#define IBLND_POSTRX_RSRVD_CREDIT 3   /* post: give myself back 1 reserved
+				       * credit */
 
-typedef struct kib_tx			   /* transmit message */
+typedef struct kib_tx                         /* transmit message */
 {
-	struct list_head		tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
-	kib_tx_pool_t	    *tx_pool;      /* pool I'm from */
-	struct kib_conn	  *tx_conn;      /* owning conn */
-	short		     tx_sending;   /* # tx callbacks outstanding */
-	short		     tx_queued;    /* queued for sending */
-	short		     tx_waiting;   /* waiting for peer */
-	int		       tx_status;    /* LNET completion status */
-	unsigned long	     tx_deadline;  /* completion deadline */
-	__u64		     tx_cookie;    /* completion cookie */
-	lnet_msg_t	       *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
-	kib_msg_t		*tx_msg;       /* message buffer (host vaddr) */
-	__u64		     tx_msgaddr;   /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR   (tx_msgunmap); /* for dma_unmap_single() */
-	int		       tx_nwrq;      /* # send work items */
-	struct ib_send_wr	*tx_wrq;       /* send work items... */
-	struct ib_sge	    *tx_sge;       /* ...and their memory */
-	kib_rdma_desc_t	  *tx_rd;	/* rdma descriptor */
-	int		       tx_nfrags;    /* # entries in... */
-	struct scatterlist       *tx_frags;     /* dma_map_sg descriptor */
-	__u64		    *tx_pages;     /* rdma phys page addrs */
+	struct list_head       tx_list;       /* queue on idle_txs ibc_tx_queue
+					       * etc. */
+	kib_tx_pool_t          *tx_pool;      /* pool I'm from */
+	struct kib_conn        *tx_conn;      /* owning conn */
+	short                  tx_sending;    /* # tx callbacks outstanding */
+	short                  tx_queued;     /* queued for sending */
+	short                  tx_waiting;    /* waiting for peer */
+	int                    tx_status;     /* LNET completion status */
+	unsigned long          tx_deadline;   /* completion deadline */
+	__u64                  tx_cookie;     /* completion cookie */
+	lnet_msg_t             *tx_lntmsg[2]; /* lnet msgs to finalize on
+					       * completion */
+	kib_msg_t              *tx_msg;       /* message buffer (host vaddr) */
+	__u64                  tx_msgaddr;    /* message buffer (I/O addr) */
+	DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+	int                    tx_nwrq;       /* # send work items */
+	struct ib_send_wr      *tx_wrq;       /* send work items... */
+	struct ib_sge          *tx_sge;       /* ...and their memory */
+	kib_rdma_desc_t        *tx_rd;        /* rdma descriptor */
+	int                    tx_nfrags;     /* # entries in... */
+	struct scatterlist     *tx_frags;     /* dma_map_sg descriptor */
+	__u64                  *tx_pages;     /* rdma phys page addrs */
 	union {
-		kib_phys_mr_t      *pmr;	/* MR for physical buffer */
-		kib_fmr_t	   fmr;	/* FMR */
-	}			 tx_u;
-	int		       tx_dmadir;    /* dma direction */
+		kib_phys_mr_t  *pmr;          /* MR for physical buffer */
+		kib_fmr_t      fmr;           /* FMR */
+	}                      tx_u;
+	int                    tx_dmadir;     /* dma direction */
 } kib_tx_t;
 
 typedef struct kib_connvars {
-	/* connection-in-progress variables */
-	kib_msg_t		 cv_msg;
+	kib_msg_t cv_msg; /* connection-in-progress variables */
 } kib_connvars_t;
 
 typedef struct kib_conn {
-	struct kib_sched_info *ibc_sched;	/* scheduler information */
-	struct kib_peer     *ibc_peer;	  /* owning peer */
-	kib_hca_dev_t       *ibc_hdev;	  /* HCA bound on */
-	struct list_head	   ibc_list;	  /* stash on peer's conn list */
-	struct list_head	   ibc_sched_list;    /* schedule for attention */
-	__u16		ibc_version;       /* version of connection */
-	__u64		ibc_incarnation;   /* which instance of the peer */
-	atomic_t	 ibc_refcount;      /* # users */
-	int		  ibc_state;	 /* what's happening */
-	int		  ibc_nsends_posted; /* # uncompleted sends */
-	int		  ibc_noops_posted;  /* # uncompleted NOOPs */
-	int		  ibc_credits;       /* # credits I have */
-	int		  ibc_outstanding_credits; /* # credits to return */
-	int		  ibc_reserved_credits;/* # ACK/DONE msg credits */
-	int		  ibc_comms_error;   /* set on comms error */
-	unsigned int	     ibc_nrx:16;	/* receive buffers owned */
-	unsigned int	     ibc_scheduled:1;   /* scheduled for attention */
-	unsigned int	     ibc_ready:1;       /* CQ callback fired */
-	/* time of last send */
-	unsigned long	ibc_last_send;
-	/** link chain for kiblnd_check_conns only */
-	struct list_head	   ibc_connd_list;
-	/** rxs completed before ESTABLISHED */
-	struct list_head	   ibc_early_rxs;
-	/** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
-	struct list_head	   ibc_tx_noops;
-	struct list_head	   ibc_tx_queue;       /* sends that need a credit */
-	struct list_head	   ibc_tx_queue_nocred;/* sends that don't need a credit */
-	struct list_head	   ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
-	struct list_head	   ibc_active_txs;     /* active tx awaiting completion */
-	spinlock_t	     ibc_lock;		 /* serialise */
-	kib_rx_t	    *ibc_rxs;	    /* the rx descs */
-	kib_pages_t	 *ibc_rx_pages;       /* premapped rx msg pages */
-
-	struct rdma_cm_id   *ibc_cmid;	   /* CM id */
-	struct ib_cq	*ibc_cq;	     /* completion queue */
-
-	kib_connvars_t      *ibc_connvars;       /* in-progress connection state */
+	struct kib_sched_info *ibc_sched;           /* scheduler information */
+	struct kib_peer       *ibc_peer;            /* owning peer */
+	kib_hca_dev_t         *ibc_hdev;            /* HCA bound on */
+	struct list_head      ibc_list;             /* stash on peer's conn
+						     * list */
+	struct list_head      ibc_sched_list;       /* schedule for attention */
+	__u16                 ibc_version;          /* version of connection */
+	__u64                 ibc_incarnation;      /* which instance of the
+						     * peer */
+	atomic_t              ibc_refcount;         /* # users */
+	int                   ibc_state;            /* what's happening */
+	int                   ibc_nsends_posted;    /* # uncompleted sends */
+	int                   ibc_noops_posted;     /* # uncompleted NOOPs */
+	int                   ibc_credits;          /* # credits I have */
+	int                   ibc_outstanding_credits; /* # credits to return */
+	int                   ibc_reserved_credits; /* # ACK/DONE msg credits */
+	int                   ibc_comms_error;      /* set on comms error */
+	unsigned int          ibc_nrx:16;           /* receive buffers owned */
+	unsigned int          ibc_scheduled:1;      /* scheduled for attention
+						     */
+	unsigned int          ibc_ready:1;          /* CQ callback fired */
+	unsigned long         ibc_last_send;        /* time of last send */
+	struct list_head      ibc_connd_list;       /* link chain for
+						     * kiblnd_check_conns only
+						     */
+	struct list_head      ibc_early_rxs;        /* rxs completed before
+						     * ESTABLISHED */
+	struct list_head      ibc_tx_noops;         /* IBLND_MSG_NOOPs for
+						     * IBLND_MSG_VERSION_1 */
+	struct list_head      ibc_tx_queue;         /* sends that need a credit
+						     */
+	struct list_head      ibc_tx_queue_nocred;  /* sends that don't need a
+						     * credit */
+	struct list_head      ibc_tx_queue_rsrvd;   /* sends that need to
+						     * reserve an ACK/DONE msg
+						     */
+	struct list_head      ibc_active_txs;       /* active tx awaiting
+						     * completion */
+	spinlock_t            ibc_lock;             /* serialise */
+	kib_rx_t              *ibc_rxs;             /* the rx descs */
+	kib_pages_t           *ibc_rx_pages;        /* premapped rx msg pages */
+
+	struct rdma_cm_id     *ibc_cmid;            /* CM id */
+	struct ib_cq          *ibc_cq;              /* completion queue */
+
+	kib_connvars_t        *ibc_connvars;        /* in-progress connection
+						     * state */
 } kib_conn_t;
 
-#define IBLND_CONN_INIT	       0	 /* being initialised */
-#define IBLND_CONN_ACTIVE_CONNECT     1	 /* active sending req */
-#define IBLND_CONN_PASSIVE_WAIT       2	 /* passive waiting for rtu */
-#define IBLND_CONN_ESTABLISHED	3	 /* connection established */
-#define IBLND_CONN_CLOSING	    4	 /* being closed */
-#define IBLND_CONN_DISCONNECTED       5	 /* disconnected */
+#define IBLND_CONN_INIT           0	 /* being initialised */
+#define IBLND_CONN_ACTIVE_CONNECT 1	 /* active sending req */
+#define IBLND_CONN_PASSIVE_WAIT   2	 /* passive waiting for rtu */
+#define IBLND_CONN_ESTABLISHED    3	 /* connection established */
+#define IBLND_CONN_CLOSING        4	 /* being closed */
+#define IBLND_CONN_DISCONNECTED   5	 /* disconnected */
 
 typedef struct kib_peer {
-	struct list_head	   ibp_list;	   /* stash on global peer list */
-	lnet_nid_t	   ibp_nid;	    /* who's on the other end(s) */
-	lnet_ni_t	   *ibp_ni;	     /* LNet interface */
-	atomic_t	 ibp_refcount;       /* # users */
-	struct list_head	   ibp_conns;	  /* all active connections */
-	struct list_head	   ibp_tx_queue;       /* msgs waiting for a conn */
-	__u16		ibp_version;	/* version of peer */
-	__u64		ibp_incarnation;    /* incarnation of peer */
-	int		  ibp_connecting;     /* current active connection attempts */
-	int		  ibp_accepting;      /* current passive connection attempts */
-	int		  ibp_error;	  /* errno on closing this peer */
-	unsigned long	   ibp_last_alive;     /* when (in jiffies) I was last alive */
+	struct list_head ibp_list;        /* stash on global peer list */
+	lnet_nid_t       ibp_nid;         /* who's on the other end(s) */
+	lnet_ni_t        *ibp_ni;         /* LNet interface */
+	atomic_t         ibp_refcount;    /* # users */
+	struct list_head ibp_conns;       /* all active connections */
+	struct list_head ibp_tx_queue;    /* msgs waiting for a conn */
+	__u16            ibp_version;     /* version of peer */
+	__u64            ibp_incarnation; /* incarnation of peer */
+	int              ibp_connecting;  /* current active connection attempts
+					   */
+	int              ibp_accepting;   /* current passive connection attempts
+					   */
+	int              ibp_error;       /* errno on closing this peer */
+	unsigned long    ibp_last_alive;  /* when (in jiffies) I was last alive
+					   */
 } kib_peer_t;
 
-extern kib_data_t      kiblnd_data;
+extern kib_data_t kiblnd_data;
 
 extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
 
@@ -941,8 +959,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
  * right because OFED1.2 defines it as const, to use it we have to add
  * (void *) cast to overcome "const" */
 
-#define KIBLND_CONN_PARAM(e)	    ((e)->param.conn.private_data)
-#define KIBLND_CONN_PARAM_LEN(e)	((e)->param.conn.private_data_len)
+#define KIBLND_CONN_PARAM(e)     ((e)->param.conn.private_data)
+#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
 
 
 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index dbf3749..477aa8b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -44,9 +44,9 @@ static void
 kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
 {
 	lnet_msg_t *lntmsg[2];
-	kib_net_t  *net = ni->ni_data;
-	int	 rc;
-	int	 i;
+	kib_net_t *net = ni->ni_data;
+	int rc;
+	int i;
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
@@ -102,10 +102,10 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
 static kib_tx_t *
 kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
 {
-	kib_net_t		*net = (kib_net_t *)ni->ni_data;
-	struct list_head		*node;
-	kib_tx_t		*tx;
-	kib_tx_poolset_t	*tps;
+	kib_net_t *net = (kib_net_t *)ni->ni_data;
+	struct list_head *node;
+	kib_tx_t *tx;
+	kib_tx_poolset_t *tps;
 
 	tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
 	node = kiblnd_pool_alloc_node(&tps->tps_poolset);
@@ -130,9 +130,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
 static void
 kiblnd_drop_rx(kib_rx_t *rx)
 {
-	kib_conn_t		*conn	= rx->rx_conn;
-	struct kib_sched_info	*sched	= conn->ibc_sched;
-	unsigned long		flags;
+	kib_conn_t *conn = rx->rx_conn;
+	struct kib_sched_info *sched = conn->ibc_sched;
+	unsigned long flags;
 
 	spin_lock_irqsave(&sched->ibs_lock, flags);
 	LASSERT(conn->ibc_nrx > 0);
@@ -145,11 +145,11 @@ kiblnd_drop_rx(kib_rx_t *rx)
 int
 kiblnd_post_rx(kib_rx_t *rx, int credit)
 {
-	kib_conn_t	 *conn = rx->rx_conn;
-	kib_net_t	  *net = conn->ibc_peer->ibp_ni->ni_data;
-	struct ib_recv_wr  *bad_wrq = NULL;
-	struct ib_mr       *mr;
-	int		 rc;
+	kib_conn_t *conn = rx->rx_conn;
+	kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+	struct ib_recv_wr *bad_wrq = NULL;
+	struct ib_mr *mr;
+	int rc;
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
@@ -164,10 +164,10 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
 	rx->rx_sge.addr   = rx->rx_msgaddr;
 	rx->rx_sge.length = IBLND_MSG_SIZE;
 
-	rx->rx_wrq.next = NULL;
+	rx->rx_wrq.next    = NULL;
 	rx->rx_wrq.sg_list = &rx->rx_sge;
 	rx->rx_wrq.num_sge = 1;
-	rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
+	rx->rx_wrq.wr_id   = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
 
 	LASSERT(conn->ibc_state >= IBLND_CONN_INIT);
 	LASSERT(rx->rx_nob >= 0);	      /* not posted */
@@ -212,7 +212,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
 static kib_tx_t *
 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 {
-	struct list_head   *tmp;
+	struct list_head *tmp;
 
 	list_for_each(tmp, &conn->ibc_active_txs) {
 		kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
@@ -237,9 +237,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 static void
 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 {
-	kib_tx_t    *tx;
-	lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-	int	  idle;
+	kib_tx_t *tx;
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	int idle;
 
 	spin_lock(&conn->ibc_lock);
 
@@ -276,8 +276,8 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 static void
 kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
 {
-	lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-	kib_tx_t    *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
 
 	if (tx = NULL) {
 		CERROR("Can't get tx for completion %x for %s\n",
@@ -295,14 +295,14 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
 static void
 kiblnd_handle_rx(kib_rx_t *rx)
 {
-	kib_msg_t    *msg = rx->rx_msg;
-	kib_conn_t   *conn = rx->rx_conn;
-	lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
-	int	   credits = msg->ibm_credits;
-	kib_tx_t     *tx;
-	int	   rc = 0;
-	int	   rc2;
-	int	   post_credit;
+	kib_msg_t *msg = rx->rx_msg;
+	kib_conn_t *conn = rx->rx_conn;
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	int credits = msg->ibm_credits;
+	kib_tx_t *tx;
+	int rc = 0;
+	int rc2;
+	int post_credit;
 
 	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -456,12 +456,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
 static void
 kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
 {
-	kib_msg_t    *msg = rx->rx_msg;
-	kib_conn_t   *conn = rx->rx_conn;
-	lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
-	kib_net_t    *net = ni->ni_data;
-	int	   rc;
-	int	   err = -EIO;
+	kib_msg_t *msg = rx->rx_msg;
+	kib_conn_t *conn = rx->rx_conn;
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	kib_net_t *net = ni->ni_data;
+	int rc;
+	int err = -EIO;
 
 	LASSERT(net != NULL);
 	LASSERT(rx->rx_nob < 0);	       /* was posted */
@@ -502,8 +502,8 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
 	/* racing with connection establishment/teardown! */
 
 	if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-		rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
-		unsigned long  flags;
+		rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+		unsigned long flags;
 
 		write_lock_irqsave(g_lock, flags);
 		/* must check holding global lock to eliminate race */
@@ -550,19 +550,19 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
 static int
 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-	kib_hca_dev_t		*hdev;
-	__u64			*pages = tx->tx_pages;
-	kib_fmr_poolset_t	*fps;
-	int			npages;
-	int			size;
-	int			cpt;
-	int			rc;
-	int			i;
+	kib_hca_dev_t *hdev;
+	__u64 *pages = tx->tx_pages;
+	kib_fmr_poolset_t *fps;
+	int npages;
+	int size;
+	int cpt;
+	int rc;
+	int i;
 
 	LASSERT(tx->tx_pool != NULL);
 	LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
 
-	hdev  = tx->tx_pool->tpo_hdev;
+	hdev = tx->tx_pool->tpo_hdev;
 
 	for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
 		for (size = 0; size <  rd->rd_frags[i].rf_nob;
@@ -586,7 +586,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 	rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
 					 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
 	rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
-	rd->rd_frags[0].rf_nob   = nob;
+	rd->rd_frags[0].rf_nob = nob;
 	rd->rd_nfrags = 1;
 
 	return 0;
@@ -595,11 +595,11 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 static int
 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-	kib_hca_dev_t		*hdev;
-	kib_pmr_poolset_t	*pps;
-	__u64			iova;
-	int			cpt;
-	int			rc;
+	kib_hca_dev_t *hdev;
+	kib_pmr_poolset_t *pps;
+	__u64 iova;
+	int cpt;
+	int rc;
 
 	LASSERT(tx->tx_pool != NULL);
 	LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
@@ -623,7 +623,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 					 tx->tx_u.pmr->pmr_mr->lkey;
 	rd->rd_nfrags = 1;
 	rd->rd_frags[0].rf_addr = iova;
-	rd->rd_frags[0].rf_nob  = nob;
+	rd->rd_frags[0].rf_nob = nob;
 
 	return 0;
 }
@@ -631,7 +631,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 void
 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
 {
-	kib_net_t  *net = ni->ni_data;
+	kib_net_t *net = ni->ni_data;
 
 	LASSERT(net != NULL);
 
@@ -655,20 +655,19 @@ int
 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
 	      kib_rdma_desc_t *rd, int nfrags)
 {
-	kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
-	kib_net_t	  *net   = ni->ni_data;
-	struct ib_mr       *mr    = NULL;
-	__u32	       nob;
-	int		 i;
+	kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+	kib_net_t *net = ni->ni_data;
+	struct ib_mr *mr    = NULL;
+	__u32 nob;
+	int i;
 
 	/* If rd is not tx_rd, it's going to get sent to a peer and I'm the
 	 * RDMA sink */
 	tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 	tx->tx_nfrags = nfrags;
 
-	rd->rd_nfrags -		kiblnd_dma_map_sg(hdev->ibh_ibdev,
-				  tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+	rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
+					  tx->tx_nfrags, tx->tx_dmadir);
 
 	for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
 		rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
@@ -699,12 +698,12 @@ static int
 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 		    unsigned int niov, struct kvec *iov, int offset, int nob)
 {
-	kib_net_t	  *net = ni->ni_data;
-	struct page	*page;
+	kib_net_t *net = ni->ni_data;
+	struct page *page;
 	struct scatterlist *sg;
-	unsigned long       vaddr;
-	int		 fragnob;
-	int		 page_offset;
+	unsigned long vaddr;
+	int fragnob;
+	int page_offset;
 
 	LASSERT(nob > 0);
 	LASSERT(niov > 0);
@@ -752,9 +751,9 @@ static int
 kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 		      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
 {
-	kib_net_t	  *net = ni->ni_data;
+	kib_net_t *net = ni->ni_data;
 	struct scatterlist *sg;
-	int		 fragnob;
+	int fragnob;
 
 	CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
 
@@ -793,11 +792,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 	__releases(conn->ibc_lock)
 	__acquires(conn->ibc_lock)
 {
-	kib_msg_t	 *msg = tx->tx_msg;
-	kib_peer_t	*peer = conn->ibc_peer;
-	int		ver = conn->ibc_version;
-	int		rc;
-	int		done;
+	kib_msg_t *msg = tx->tx_msg;
+	kib_peer_t *peer = conn->ibc_peer;
+	int ver = conn->ibc_version;
+	int rc;
+	int done;
 	struct ib_send_wr *bad_wrq;
 
 	LASSERT(tx->tx_queued);
@@ -878,8 +877,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 		/* close_conn will launch failover */
 		rc = -ENETDOWN;
 	} else {
-		rc = ib_post_send(conn->ibc_cmid->qp,
-				  tx->tx_wrq, &bad_wrq);
+		rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq);
 	}
 
 	conn->ibc_last_send = jiffies;
@@ -925,9 +923,9 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 void
 kiblnd_check_sends(kib_conn_t *conn)
 {
-	int	ver = conn->ibc_version;
+	int ver = conn->ibc_version;
 	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
-	kib_tx_t  *tx;
+	kib_tx_t *tx;
 
 	/* Don't send anything until after the connection is established */
 	if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -997,9 +995,9 @@ kiblnd_check_sends(kib_conn_t *conn)
 static void
 kiblnd_tx_complete(kib_tx_t *tx, int status)
 {
-	int	   failed = (status != IB_WC_SUCCESS);
-	kib_conn_t   *conn = tx->tx_conn;
-	int	   idle;
+	int failed = (status != IB_WC_SUCCESS);
+	kib_conn_t *conn = tx->tx_conn;
+	int idle;
 
 	LASSERT(tx->tx_sending > 0);
 
@@ -1051,11 +1049,11 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
 void
 kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 {
-	kib_hca_dev_t     *hdev = tx->tx_pool->tpo_hdev;
-	struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
+	kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+	struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
 	struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
-	int		nob = offsetof(kib_msg_t, ibm_u) + body_nob;
-	struct ib_mr      *mr;
+	int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+	struct ib_mr *mr;
 
 	LASSERT(tx->tx_nwrq >= 0);
 	LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
@@ -1086,14 +1084,14 @@ int
 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
 {
-	kib_msg_t	 *ibmsg = tx->tx_msg;
-	kib_rdma_desc_t   *srcrd = tx->tx_rd;
-	struct ib_sge     *sge = &tx->tx_sge[0];
+	kib_msg_t *ibmsg = tx->tx_msg;
+	kib_rdma_desc_t *srcrd = tx->tx_rd;
+	struct ib_sge *sge = &tx->tx_sge[0];
 	struct ib_send_wr *wrq = &tx->tx_wrq[0];
-	int		rc  = resid;
-	int		srcidx;
-	int		dstidx;
-	int		wrknob;
+	int rc  = resid;
+	int srcidx;
+	int dstidx;
+	int wrknob;
 
 	LASSERT(!in_interrupt());
 	LASSERT(tx->tx_nwrq = 0);
@@ -1144,7 +1142,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		wrq->send_flags = 0;
 
 		wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
-		wrq->wr.rdma.rkey	= kiblnd_rd_frag_key(dstrd, dstidx);
+		wrq->wr.rdma.rkey        = kiblnd_rd_frag_key(dstrd, dstidx);
 
 		srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
 		dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
@@ -1170,7 +1168,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 void
 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
 {
-	struct list_head   *q;
+	struct list_head *q;
 
 	LASSERT(tx->tx_nwrq > 0);	      /* work items set up */
 	LASSERT(!tx->tx_queued);	       /* not queued for sending already */
@@ -1271,11 +1269,11 @@ static void
 kiblnd_connect_peer(kib_peer_t *peer)
 {
 	struct rdma_cm_id *cmid;
-	kib_dev_t	 *dev;
-	kib_net_t	 *net = peer->ibp_ni->ni_data;
+	kib_dev_t *dev;
+	kib_net_t *net = peer->ibp_ni->ni_data;
 	struct sockaddr_in srcaddr;
 	struct sockaddr_in dstaddr;
-	int		rc;
+	int rc;
 
 	LASSERT(net != NULL);
 	LASSERT(peer->ibp_connecting > 0);
@@ -1335,12 +1333,12 @@ kiblnd_connect_peer(kib_peer_t *peer)
 void
 kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 {
-	kib_peer_t	*peer;
-	kib_peer_t	*peer2;
-	kib_conn_t	*conn;
-	rwlock_t	*g_lock = &kiblnd_data.kib_global_lock;
-	unsigned long      flags;
-	int		rc;
+	kib_peer_t *peer;
+	kib_peer_t *peer2;
+	kib_conn_t *conn;
+	rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+	unsigned long flags;
+	int rc;
 
 	/* If I get here, I've committed to send, so I complete the tx with
 	 * failure on any problems */
@@ -1456,20 +1454,20 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 int
 kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 {
-	lnet_hdr_t       *hdr = &lntmsg->msg_hdr;
-	int	       type = lntmsg->msg_type;
+	lnet_hdr_t *hdr = &lntmsg->msg_hdr;
+	int type = lntmsg->msg_type;
 	lnet_process_id_t target = lntmsg->msg_target;
-	int	       target_is_router = lntmsg->msg_target_is_router;
-	int	       routing = lntmsg->msg_routing;
-	unsigned int      payload_niov = lntmsg->msg_niov;
-	struct kvec      *payload_iov = lntmsg->msg_iov;
-	lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
-	unsigned int      payload_offset = lntmsg->msg_offset;
-	unsigned int      payload_nob = lntmsg->msg_len;
-	kib_msg_t	*ibmsg;
-	kib_tx_t	 *tx;
-	int	       nob;
-	int	       rc;
+	int target_is_router = lntmsg->msg_target_is_router;
+	int routing = lntmsg->msg_routing;
+	unsigned int payload_niov = lntmsg->msg_niov;
+	struct kvec *payload_iov = lntmsg->msg_iov;
+	lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+	unsigned int payload_offset = lntmsg->msg_offset;
+	unsigned int payload_nob = lntmsg->msg_len;
+	kib_msg_t *ibmsg;
+	kib_tx_t *tx;
+	int nob;
+	int rc;
 
 	/* NB 'private' is different depending on what we're sending.... */
 
@@ -1628,13 +1626,13 @@ static void
 kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
 {
 	lnet_process_id_t target = lntmsg->msg_target;
-	unsigned int      niov = lntmsg->msg_niov;
-	struct kvec      *iov = lntmsg->msg_iov;
-	lnet_kiov_t      *kiov = lntmsg->msg_kiov;
-	unsigned int      offset = lntmsg->msg_offset;
-	unsigned int      nob = lntmsg->msg_len;
-	kib_tx_t	 *tx;
-	int	       rc;
+	unsigned int niov = lntmsg->msg_niov;
+	struct kvec *iov = lntmsg->msg_iov;
+	lnet_kiov_t *kiov = lntmsg->msg_kiov;
+	unsigned int offset = lntmsg->msg_offset;
+	unsigned int nob = lntmsg->msg_len;
+	kib_tx_t *tx;
+	int rc;
 
 	tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
 	if (tx = NULL) {
@@ -1691,14 +1689,14 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 	     unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
 	     unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-	kib_rx_t    *rx = private;
-	kib_msg_t   *rxmsg = rx->rx_msg;
-	kib_conn_t  *conn = rx->rx_conn;
-	kib_tx_t    *tx;
-	kib_msg_t   *txmsg;
-	int	  nob;
-	int	  post_credit = IBLND_POSTRX_PEER_CREDIT;
-	int	  rc = 0;
+	kib_rx_t *rx = private;
+	kib_msg_t *rxmsg = rx->rx_msg;
+	kib_conn_t *conn = rx->rx_conn;
+	kib_tx_t *tx;
+	kib_msg_t *txmsg;
+	int nob;
+	int post_credit = IBLND_POSTRX_PEER_CREDIT;
+	int rc = 0;
 
 	LASSERT(mlen <= rlen);
 	LASSERT(!in_interrupt());
@@ -1828,8 +1826,8 @@ kiblnd_peer_alive(kib_peer_t *peer)
 static void
 kiblnd_peer_notify(kib_peer_t *peer)
 {
-	int	   error = 0;
-	unsigned long    last_alive = 0;
+	int error = 0;
+	unsigned long last_alive = 0;
 	unsigned long flags;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1860,9 +1858,9 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
 	 * connection to be finished off by the connd.  Otherwise the connd is
 	 * already dealing with it (either to set it up or tear it down).
 	 * Caller holds kib_global_lock exclusively in irq context */
-	kib_peer_t       *peer = conn->ibc_peer;
-	kib_dev_t	*dev;
-	unsigned long     flags;
+	kib_peer_t *peer = conn->ibc_peer;
+	kib_dev_t *dev;
+	unsigned long flags;
 
 	LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -1934,8 +1932,8 @@ kiblnd_close_conn(kib_conn_t *conn, int error)
 static void
 kiblnd_handle_early_rxs(kib_conn_t *conn)
 {
-	unsigned long    flags;
-	kib_rx_t	*rx;
+	unsigned long flags;
+	kib_rx_t *rx;
 	kib_rx_t *tmp;
 
 	LASSERT(!in_interrupt());
@@ -1957,9 +1955,9 @@ static void
 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
 {
 	LIST_HEAD(zombies);
-	struct list_head	  *tmp;
-	struct list_head	  *nxt;
-	kib_tx_t	    *tx;
+	struct list_head *tmp;
+	struct list_head *nxt;
+	kib_tx_t *tx;
 
 	spin_lock(&conn->ibc_lock);
 
@@ -2018,7 +2016,7 @@ void
 kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
 {
 	LIST_HEAD(zombies);
-	unsigned long     flags;
+	unsigned long flags;
 
 	LASSERT(error != 0);
 	LASSERT(!in_interrupt());
@@ -2071,12 +2069,12 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
 void
 kiblnd_connreq_done(kib_conn_t *conn, int status)
 {
-	kib_peer_t	*peer = conn->ibc_peer;
-	kib_tx_t	  *tx;
+	kib_peer_t *peer = conn->ibc_peer;
+	kib_tx_t *tx;
 	kib_tx_t *tmp;
-	struct list_head	 txs;
-	unsigned long      flags;
-	int		active;
+	struct list_head txs;
+	unsigned long flags;
+	int active;
 
 	active = (conn->ibc_state = IBLND_CONN_ACTIVE_CONNECT);
 
@@ -2166,7 +2164,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 static void
 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 {
-	int	  rc;
+	int rc;
 
 	rc = rdma_reject(cmid, rej, sizeof(*rej));
 
@@ -2177,22 +2175,22 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 static int
 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
-	rwlock_t		*g_lock = &kiblnd_data.kib_global_lock;
-	kib_msg_t	     *reqmsg = priv;
-	kib_msg_t	     *ackmsg;
-	kib_dev_t	     *ibdev;
-	kib_peer_t	    *peer;
-	kib_peer_t	    *peer2;
-	kib_conn_t	    *conn;
-	lnet_ni_t	     *ni  = NULL;
-	kib_net_t	     *net = NULL;
-	lnet_nid_t	     nid;
+	rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+	kib_msg_t *reqmsg = priv;
+	kib_msg_t *ackmsg;
+	kib_dev_t *ibdev;
+	kib_peer_t *peer;
+	kib_peer_t *peer2;
+	kib_conn_t *conn;
+	lnet_ni_t *ni  = NULL;
+	kib_net_t *net = NULL;
+	lnet_nid_t nid;
 	struct rdma_conn_param cp;
-	kib_rej_t	      rej;
-	int		    version = IBLND_MSG_VERSION;
-	unsigned long	  flags;
-	int		    rc;
-	struct sockaddr_in    *peer_addr;
+	kib_rej_t rej;
+	int version = IBLND_MSG_VERSION;
+	unsigned long flags;
+	int rc;
+	struct sockaddr_in *peer_addr;
 	LASSERT(!in_interrupt());
 
 	/* cmid inherits 'context' from the corresponding listener id */
@@ -2200,8 +2198,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	LASSERT(ibdev != NULL);
 
 	memset(&rej, 0, sizeof(rej));
-	rej.ibr_magic		= IBLND_MSG_MAGIC;
-	rej.ibr_why		  = IBLND_REJECT_FATAL;
+	rej.ibr_magic = IBLND_MSG_MAGIC;
+	rej.ibr_why = IBLND_REJECT_FATAL;
 	rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
 
 	peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
@@ -2243,7 +2241,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	}
 
 	nid = reqmsg->ibm_srcnid;
-	ni  = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
+	ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
 
 	if (ni != NULL) {
 		net = (kib_net_t *)ni->ni_data;
@@ -2394,7 +2392,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	 * CM callback doesn't destroy cmid. */
 
 	conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
-	conn->ibc_credits	  = IBLND_MSG_QUEUE_SIZE(version);
+	conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
 	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
 	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
 		 <= IBLND_RX_MSGS(version));
@@ -2412,12 +2410,12 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
 	memset(&cp, 0, sizeof(cp));
 	cp.private_data	= ackmsg;
-	cp.private_data_len    = ackmsg->ibm_nob;
+	cp.private_data_len = ackmsg->ibm_nob;
 	cp.responder_resources = 0;	     /* No atomic ops or RDMA reads */
-	cp.initiator_depth     = 0;
+	cp.initiator_depth = 0;
 	cp.flow_control	= 1;
-	cp.retry_count	 = *kiblnd_tunables.kib_retry_count;
-	cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
+	cp.retry_count = *kiblnd_tunables.kib_retry_count;
+	cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
 
 	CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
 
@@ -2439,7 +2437,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	if (ni != NULL)
 		lnet_ni_decref(ni);
 
-	rej.ibr_version = version;
+	rej.ibr_version             = version;
 	rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
 	rej.ibr_cp.ibcp_max_frags   = IBLND_RDMA_FRAGS(version);
 	kiblnd_reject(cmid, &rej);
@@ -2451,10 +2449,10 @@ static void
 kiblnd_reconnect(kib_conn_t *conn, int version,
 		  __u64 incarnation, int why, kib_connparams_t *cp)
 {
-	kib_peer_t    *peer = conn->ibc_peer;
-	char	  *reason;
-	int	    retry = 0;
-	unsigned long  flags;
+	kib_peer_t *peer = conn->ibc_peer;
+	char *reason;
+	int retry = 0;
+	unsigned long flags;
 
 	LASSERT(conn->ibc_state = IBLND_CONN_ACTIVE_CONNECT);
 	LASSERT(peer->ibp_connecting > 0);     /* 'conn' at least */
@@ -2513,7 +2511,7 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
 static void
 kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 {
-	kib_peer_t    *peer = conn->ibc_peer;
+	kib_peer_t *peer = conn->ibc_peer;
 
 	LASSERT(!in_interrupt());
 	LASSERT(conn->ibc_state = IBLND_CONN_ACTIVE_CONNECT);
@@ -2532,10 +2530,10 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
 	case IB_CM_REJ_CONSUMER_DEFINED:
 		if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
-			kib_rej_t	*rej	 = priv;
-			kib_connparams_t *cp	  = NULL;
-			int	       flip	= 0;
-			__u64	     incarnation = -1;
+			kib_rej_t *rej = priv;
+			kib_connparams_t *cp = NULL;
+			int flip = 0;
+			__u64 incarnation = -1;
 
 			/* NB. default incarnation is -1 because:
 			 * a) V1 will ignore dst incarnation in connreq.
@@ -2652,13 +2650,13 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 static void
 kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 {
-	kib_peer_t    *peer = conn->ibc_peer;
-	lnet_ni_t     *ni   = peer->ibp_ni;
-	kib_net_t     *net  = ni->ni_data;
-	kib_msg_t     *msg  = priv;
-	int	    ver  = conn->ibc_version;
-	int	    rc   = kiblnd_unpack_msg(msg, priv_nob);
-	unsigned long  flags;
+	kib_peer_t *peer = conn->ibc_peer;
+	lnet_ni_t *ni = peer->ibp_ni;
+	kib_net_t *net = ni->ni_data;
+	kib_msg_t *msg = priv;
+	int ver = conn->ibc_version;
+	int rc = kiblnd_unpack_msg(msg, priv_nob);
+	unsigned long flags;
 
 	LASSERT(net != NULL);
 
@@ -2726,8 +2724,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 		goto failed;
 	}
 
-	conn->ibc_incarnation      = msg->ibm_srcstamp;
-	conn->ibc_credits	  +	conn->ibc_incarnation = msg->ibm_srcstamp;
+	conn->ibc_credits  	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
 	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
 		 <= IBLND_RX_MSGS(ver));
@@ -2749,20 +2747,20 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 static int
 kiblnd_active_connect(struct rdma_cm_id *cmid)
 {
-	kib_peer_t	      *peer = (kib_peer_t *)cmid->context;
-	kib_conn_t	      *conn;
-	kib_msg_t	       *msg;
-	struct rdma_conn_param   cp;
-	int		      version;
-	__u64		    incarnation;
-	unsigned long	    flags;
-	int		      rc;
+	kib_peer_t *peer = (kib_peer_t *)cmid->context;
+	kib_conn_t *conn;
+	kib_msg_t *msg;
+	struct rdma_conn_param cp;
+	int version;
+	__u64 incarnation;
+	unsigned long flags;
+	int rc;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	incarnation = peer->ibp_incarnation;
-	version     = (peer->ibp_version = 0) ? IBLND_MSG_VERSION :
-						 peer->ibp_version;
+	version = (peer->ibp_version = 0) ? IBLND_MSG_VERSION :
+					     peer->ibp_version;
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
@@ -2793,8 +2791,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 	cp.private_data_len    = msg->ibm_nob;
 	cp.responder_resources = 0;	     /* No atomic ops or RDMA reads */
 	cp.initiator_depth     = 0;
-	cp.flow_control	= 1;
-	cp.retry_count	 = *kiblnd_tunables.kib_retry_count;
+	cp.flow_control        = 1;
+	cp.retry_count         = *kiblnd_tunables.kib_retry_count;
 	cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
 
 	LASSERT(cmid->context = (void *)conn);
@@ -2814,9 +2812,9 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 int
 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 {
-	kib_peer_t  *peer;
-	kib_conn_t  *conn;
-	int	  rc;
+	kib_peer_t *peer;
+	kib_conn_t *conn;
+	int rc;
 
 	switch (event->event) {
 	default:
@@ -2983,8 +2981,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 static int
 kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
 {
-	kib_tx_t	  *tx;
-	struct list_head	*ttmp;
+	kib_tx_t *tx;
+	struct list_head *ttmp;
 
 	list_for_each(ttmp, txs) {
 		tx = list_entry(ttmp, kib_tx_t, tx_list);
@@ -3022,13 +3020,13 @@ kiblnd_check_conns(int idx)
 {
 	LIST_HEAD(closes);
 	LIST_HEAD(checksends);
-	struct list_head    *peers = &kiblnd_data.kib_peers[idx];
-	struct list_head    *ptmp;
-	kib_peer_t    *peer;
-	kib_conn_t    *conn;
+	struct list_head *peers = &kiblnd_data.kib_peers[idx];
+	struct list_head *ptmp;
+	kib_peer_t *peer;
+	kib_conn_t *conn;
 	kib_conn_t *tmp;
-	struct list_head    *ctmp;
-	unsigned long  flags;
+	struct list_head *ctmp;
+	unsigned long flags;
 
 	/* NB. We expect to have a look at all the peers and not find any
 	 * RDMAs to time out, so we just use a shared lock while we
@@ -3114,14 +3112,14 @@ kiblnd_disconnect_conn(kib_conn_t *conn)
 int
 kiblnd_connd(void *arg)
 {
-	wait_queue_t     wait;
-	unsigned long      flags;
-	kib_conn_t	*conn;
-	int		timeout;
-	int		i;
-	int		dropped_lock;
-	int		peer_index = 0;
-	unsigned long      deadline = jiffies;
+	wait_queue_t wait;
+	unsigned long flags;
+	kib_conn_t *conn;
+	int timeout;
+	int i;
+	int dropped_lock;
+	int peer_index = 0;
+	unsigned long deadline = jiffies;
 
 	cfs_block_allsigs();
 
@@ -3169,7 +3167,7 @@ kiblnd_connd(void *arg)
 		if (timeout <= 0) {
 			const int n = 4;
 			const int p = 1;
-			int       chunk = kiblnd_data.kib_peer_hash_size;
+			int chunk = kiblnd_data.kib_peer_hash_size;
 
 			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 			dropped_lock = 1;
@@ -3273,9 +3271,9 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
 	 * consuming my CQ I could be called after all completions have
 	 * occurred.  But in this case, ibc_nrx = 0 && ibc_nsends_posted = 0
 	 * and this CQ is about to be destroyed so I NOOP. */
-	kib_conn_t		*conn = (kib_conn_t *)arg;
-	struct kib_sched_info	*sched = conn->ibc_sched;
-	unsigned long		flags;
+	kib_conn_t *conn = (kib_conn_t *)arg;
+	struct kib_sched_info *sched = conn->ibc_sched;
+	unsigned long flags;
 
 	LASSERT(cq = conn->ibc_cq);
 
@@ -3309,15 +3307,15 @@ kiblnd_cq_event(struct ib_event *event, void *arg)
 int
 kiblnd_scheduler(void *arg)
 {
-	long			id = (long)arg;
-	struct kib_sched_info	*sched;
-	kib_conn_t		*conn;
-	wait_queue_t		wait;
-	unsigned long		flags;
-	struct ib_wc		wc;
-	int			did_something;
-	int			busy_loops = 0;
-	int			rc;
+	long id = (long)arg;
+	struct kib_sched_info *sched;
+	kib_conn_t *conn;
+	wait_queue_t wait;
+	unsigned long flags;
+	struct ib_wc wc;
+	int did_something;
+	int busy_loops = 0;
+	int rc;
 
 	cfs_block_allsigs();
 
@@ -3432,11 +3430,11 @@ kiblnd_scheduler(void *arg)
 int
 kiblnd_failover_thread(void *arg)
 {
-	rwlock_t		*glock = &kiblnd_data.kib_global_lock;
-	kib_dev_t	 *dev;
-	wait_queue_t     wait;
-	unsigned long      flags;
-	int		rc;
+	rwlock_t *glock = &kiblnd_data.kib_global_lock;
+	kib_dev_t *dev;
+	wait_queue_t wait;
+	unsigned long flags;
+	int rc;
 
 	LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
 
@@ -3446,8 +3444,8 @@ kiblnd_failover_thread(void *arg)
 	write_lock_irqsave(glock, flags);
 
 	while (!kiblnd_data.kib_shutdown) {
-		int     do_failover = 0;
-		int     long_sleep;
+		int do_failover = 0;
+		int long_sleep;
 
 		list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
 				    ibd_fail_list) {
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index eedf01a..b0e0036 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -150,30 +150,30 @@ module_param(use_privileged_port, int, 0644);
 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
 
 kib_tunables_t kiblnd_tunables = {
-	.kib_dev_failover	   = &dev_failover,
-	.kib_service		= &service,
-	.kib_cksum		  = &cksum,
-	.kib_timeout		= &timeout,
-	.kib_keepalive	      = &keepalive,
-	.kib_ntx		    = &ntx,
-	.kib_credits		= &credits,
-	.kib_peertxcredits	  = &peer_credits,
-	.kib_peercredits_hiw	= &peer_credits_hiw,
-	.kib_peerrtrcredits	 = &peer_buffer_credits,
-	.kib_peertimeout	    = &peer_timeout,
-	.kib_default_ipif	   = &ipif_name,
-	.kib_retry_count	    = &retry_count,
-	.kib_rnr_retry_count	= &rnr_retry_count,
-	.kib_concurrent_sends       = &concurrent_sends,
-	.kib_ib_mtu		 = &ib_mtu,
-	.kib_map_on_demand	  = &map_on_demand,
-	.kib_fmr_pool_size	  = &fmr_pool_size,
-	.kib_fmr_flush_trigger      = &fmr_flush_trigger,
-	.kib_fmr_cache	      = &fmr_cache,
-	.kib_pmr_pool_size	  = &pmr_pool_size,
-	.kib_require_priv_port      = &require_privileged_port,
-	.kib_use_priv_port	    = &use_privileged_port,
-	.kib_nscheds		    = &nscheds
+	.kib_dev_failover      = &dev_failover,
+	.kib_service           = &service,
+	.kib_cksum             = &cksum,
+	.kib_timeout           = &timeout,
+	.kib_keepalive         = &keepalive,
+	.kib_ntx               = &ntx,
+	.kib_credits           = &credits,
+	.kib_peertxcredits     = &peer_credits,
+	.kib_peercredits_hiw   = &peer_credits_hiw,
+	.kib_peerrtrcredits    = &peer_buffer_credits,
+	.kib_peertimeout       = &peer_timeout,
+	.kib_default_ipif      = &ipif_name,
+	.kib_retry_count       = &retry_count,
+	.kib_rnr_retry_count   = &rnr_retry_count,
+	.kib_concurrent_sends  = &concurrent_sends,
+	.kib_ib_mtu            = &ib_mtu,
+	.kib_map_on_demand     = &map_on_demand,
+	.kib_fmr_pool_size     = &fmr_pool_size,
+	.kib_fmr_flush_trigger = &fmr_flush_trigger,
+	.kib_fmr_cache         = &fmr_cache,
+	.kib_pmr_pool_size     = &pmr_pool_size,
+	.kib_require_priv_port = &require_privileged_port,
+	.kib_use_priv_port     = &use_privileged_port,
+	.kib_nscheds           = &nscheds
 };
 
 int
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (4 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue Mike Shuey
                   ` (16 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

LNET_MUTEX_LOCK and LNET_MUTEX_UNLOCK are verbose wrappers to mutex_lock and
mutex_unlock.  Get rid of these.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/include/linux/lnet/lib-lnet.h   |    2 -
 drivers/staging/lustre/lnet/lnet/api-ni.c          |   26 ++++++++++----------
 drivers/staging/lustre/lnet/lnet/module.c          |   12 ++++----
 3 files changed, 19 insertions(+), 21 deletions(-)

diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 0038d29..d84aa9a 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -172,8 +172,6 @@ lnet_net_lock_current(void)
 #define lnet_eq_wait_unlock()	spin_unlock(&the_lnet.ln_eq_wait_lock)
 #define lnet_ni_lock(ni)	spin_lock(&(ni)->ni_lock)
 #define lnet_ni_unlock(ni)	spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m)	mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m)	mutex_unlock(m)
 
 #define MAX_PORTALS     64
 
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 1adc481..2230eb0 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -282,7 +282,7 @@ lnet_find_lnd_by_type(int type)
 void
 lnet_register_lnd(lnd_t *lnd)
 {
-	LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+	mutex_lock(&the_lnet.ln_lnd_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
@@ -293,14 +293,14 @@ lnet_register_lnd(lnd_t *lnd)
 
 	CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
 
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+	mutex_unlock(&the_lnet.ln_lnd_mutex);
 }
 EXPORT_SYMBOL(lnet_register_lnd);
 
 void
 lnet_unregister_lnd(lnd_t *lnd)
 {
-	LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+	mutex_lock(&the_lnet.ln_lnd_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) = lnd);
@@ -309,7 +309,7 @@ lnet_unregister_lnd(lnd_t *lnd)
 	list_del(&lnd->lnd_list);
 	CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
 
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+	mutex_unlock(&the_lnet.ln_lnd_mutex);
 }
 EXPORT_SYMBOL(lnet_unregister_lnd);
 
@@ -1055,18 +1055,18 @@ lnet_startup_lndnis(void)
 			goto failed;
 		}
 
-		LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+		mutex_lock(&the_lnet.ln_lnd_mutex);
 		lnd = lnet_find_lnd_by_type(lnd_type);
 
 		if (lnd = NULL) {
-			LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+			mutex_unlock(&the_lnet.ln_lnd_mutex);
 			rc = request_module("%s",
 						libcfs_lnd2modname(lnd_type));
-			LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+			mutex_lock(&the_lnet.ln_lnd_mutex);
 
 			lnd = lnet_find_lnd_by_type(lnd_type);
 			if (lnd = NULL) {
-				LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+				mutex_unlock(&the_lnet.ln_lnd_mutex);
 				CERROR("Can't load LND %s, module %s, rc=%d\n",
 				       libcfs_lnd2str(lnd_type),
 				       libcfs_lnd2modname(lnd_type), rc);
@@ -1082,7 +1082,7 @@ lnet_startup_lndnis(void)
 
 		rc = (lnd->lnd_startup)(ni);
 
-		LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+		mutex_unlock(&the_lnet.ln_lnd_mutex);
 
 		if (rc != 0) {
 			LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
@@ -1272,7 +1272,7 @@ LNetNIInit(lnet_pid_t requested_pid)
 	int im_a_router = 0;
 	int rc;
 
-	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+	mutex_lock(&the_lnet.ln_api_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
@@ -1343,7 +1343,7 @@ LNetNIInit(lnet_pid_t requested_pid)
  failed0:
 	LASSERT(rc < 0);
  out:
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+	mutex_unlock(&the_lnet.ln_api_mutex);
 	return rc;
 }
 EXPORT_SYMBOL(LNetNIInit);
@@ -1360,7 +1360,7 @@ EXPORT_SYMBOL(LNetNIInit);
 int
 LNetNIFini(void)
 {
-	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+	mutex_lock(&the_lnet.ln_api_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -1383,7 +1383,7 @@ LNetNIFini(void)
 		lnet_unprepare();
 	}
 
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+	mutex_unlock(&the_lnet.ln_api_mutex);
 	return 0;
 }
 EXPORT_SYMBOL(LNetNIFini);
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index f73d644..6881b9c 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -49,7 +49,7 @@ lnet_configure(void *arg)
 	/* 'arg' only there so I can be passed to cfs_create_thread() */
 	int rc = 0;
 
-	LNET_MUTEX_LOCK(&lnet_config_mutex);
+	mutex_lock(&lnet_config_mutex);
 
 	if (!the_lnet.ln_niinit_self) {
 		rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
@@ -59,7 +59,7 @@ lnet_configure(void *arg)
 		}
 	}
 
-	LNET_MUTEX_UNLOCK(&lnet_config_mutex);
+	mutex_unlock(&lnet_config_mutex);
 	return rc;
 }
 
@@ -68,18 +68,18 @@ lnet_unconfigure(void)
 {
 	int refcount;
 
-	LNET_MUTEX_LOCK(&lnet_config_mutex);
+	mutex_lock(&lnet_config_mutex);
 
 	if (the_lnet.ln_niinit_self) {
 		the_lnet.ln_niinit_self = 0;
 		LNetNIFini();
 	}
 
-	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+	mutex_lock(&the_lnet.ln_api_mutex);
 	refcount = the_lnet.ln_refcount;
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+	mutex_unlock(&the_lnet.ln_api_mutex);
 
-	LNET_MUTEX_UNLOCK(&lnet_config_mutex);
+	mutex_unlock(&lnet_config_mutex);
 	return (refcount = 0) ? 0 : -EBUSY;
 }
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (5 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
                   ` (15 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Some debug code was left in the tree, blocked by a #if 0.  Also kicked out
checkpatch.pl warnings.  Remove it.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/lib-msg.c |   17 -----------------
 1 files changed, 0 insertions(+), 17 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 65d7595..55e6638 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -444,23 +444,6 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
 
 	if (msg = NULL)
 		return;
-#if 0
-	CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
-	       lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
-	       msg->msg_target_is_router ? "t" : "",
-	       msg->msg_routing ? "X" : "",
-	       msg->msg_ack ? "A" : "",
-	       msg->msg_sending ? "S" : "",
-	       msg->msg_receiving ? "R" : "",
-	       msg->msg_delayed ? "d" : "",
-	       msg->msg_txcredit ? "C" : "",
-	       msg->msg_peertxcredit ? "c" : "",
-	       msg->msg_rtrcredit ? "F" : "",
-	       msg->msg_peerrtrcredit ? "f" : "",
-	       msg->msg_onactivelist ? "!" : "",
-	       msg->msg_txpeer = NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
-	       msg->msg_rxpeer = NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
-#endif
 	msg->msg_ev.status = status;
 
 	if (msg->msg_md != NULL) {
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (6 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 21:00   ` Joe Perches
  2015-05-21 19:50 ` [PATCH v4 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes Mike Shuey
                   ` (14 subsequent siblings)
  22 siblings, 1 reply; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Fix many checkpatch.pl warnings.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/acceptor.c    |   69 ++++++++++++++---------
 drivers/staging/lustre/lnet/lnet/api-ni.c      |   18 ++++--
 drivers/staging/lustre/lnet/lnet/config.c      |    2 +-
 drivers/staging/lustre/lnet/lnet/lib-eq.c      |    4 +-
 drivers/staging/lustre/lnet/lnet/lib-md.c      |    4 +-
 drivers/staging/lustre/lnet/lnet/lib-ptl.c     |    9 ++-
 drivers/staging/lustre/lnet/lnet/router.c      |   17 ++++--
 drivers/staging/lustre/lnet/lnet/router_proc.c |   16 ++++--
 8 files changed, 88 insertions(+), 51 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 69d4b19..21af335 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
 	switch (rc) {
 	/* "normal" errors */
 	case -ECONNREFUSED:
-		CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
-			libcfs_nid2str(peer_nid),
-			&peer_ip, peer_port);
+		CNETERR(
+			"Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
+			libcfs_nid2str(peer_nid), &peer_ip, peer_port);
 		break;
 	case -EHOSTUNREACH:
 	case -ENETUNREACH:
-		CNETERR("Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
+		CNETERR(
+			"Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
 			libcfs_nid2str(peer_nid), &peer_ip);
 		break;
 	case -ETIMEDOUT:
-		CNETERR("Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
-			libcfs_nid2str(peer_nid),
-			&peer_ip, peer_port);
+		CNETERR(
+			"Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
+			libcfs_nid2str(peer_nid), &peer_ip, peer_port);
 		break;
 	case -ECONNRESET:
-		LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
-				   libcfs_nid2str(peer_nid),
-				   &peer_ip, peer_port,
-				   libcfs_nid2str(peer_nid));
+		LCONSOLE_ERROR_MSG(0x11b,
+				   "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
+				   libcfs_nid2str(peer_nid), &peer_ip,
+				   peer_port, libcfs_nid2str(peer_nid));
 		break;
 	case -EPROTO:
-		LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
-				   libcfs_nid2str(peer_nid),
-				   &peer_ip, peer_port);
+		LCONSOLE_ERROR_MSG(0x11c,
+				   "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
+				   libcfs_nid2str(peer_nid), &peer_ip,
+				   peer_port);
 		break;
 	case -EADDRINUSE:
-		LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to connect to %s at host %pI4h on port %d\n",
+		LCONSOLE_ERROR_MSG(0x11d,
+				   "No privileged ports available to connect to %s at host %pI4h on port %d\n",
 				   libcfs_nid2str(peer_nid),
 				   &peer_ip, peer_port);
 		break;
 	default:
-		LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
+		LCONSOLE_ERROR_MSG(0x11e,
+				   "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
 				   rc, libcfs_nid2str(peer_nid),
 				   &peer_ip, peer_port);
 		break;
@@ -238,7 +242,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 					       accept_timeout);
 
 			if (rc != 0)
-				CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
+				CERROR(
+				       "Error sending magic+version in response to LNET magic from %pI4h: %d\n",
 				       &peer_ip, rc);
 			return -EPROTO;
 		}
@@ -250,7 +255,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 		else
 			str = "unrecognised";
 
-		LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
+		LCONSOLE_ERROR_MSG(0x11f,
+				   "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
 				   &peer_ip, magic, str);
 		return -EPROTO;
 	}
@@ -261,7 +267,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 			      sizeof(cr.acr_version),
 			      accept_timeout);
 	if (rc != 0) {
-		CERROR("Error %d reading connection request version from %pI4h\n",
+		CERROR(
+			"Error %d reading connection request version from %pI4h\n",
 			rc, &peer_ip);
 		return -EIO;
 	}
@@ -284,7 +291,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 				       accept_timeout);
 
 		if (rc != 0)
-			CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
+			CERROR(
+			       "Error sending magic+version in response to version %d from %pI4h: %d\n",
 			       peer_version, &peer_ip, rc);
 		return -EPROTO;
 	}
@@ -294,7 +302,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 			      offsetof(lnet_acceptor_connreq_t, acr_nid),
 			      accept_timeout);
 	if (rc != 0) {
-		CERROR("Error %d reading connection request from %pI4h\n",
+		CERROR(
+			"Error %d reading connection request from %pI4h\n",
 			rc, &peer_ip);
 		return -EIO;
 	}
@@ -307,7 +316,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 	    ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
 		if (ni != NULL)
 			lnet_ni_decref(ni);
-		LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
+		LCONSOLE_ERROR_MSG(0x120,
+				   "Refusing connection from %pI4h for %s: No matching NI\n",
 				   &peer_ip, libcfs_nid2str(cr.acr_nid));
 		return -EPERM;
 	}
@@ -315,7 +325,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 	if (ni->ni_lnd->lnd_accept = NULL) {
 		/* This catches a request for the loopback LND */
 		lnet_ni_decref(ni);
-		LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
+		LCONSOLE_ERROR_MSG(0x121,
+				  "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
 				  &peer_ip, libcfs_nid2str(cr.acr_nid));
 		return -EPERM;
 	}
@@ -347,10 +358,12 @@ lnet_acceptor(void *arg)
 				0, accept_port, accept_backlog);
 	if (rc != 0) {
 		if (rc = -EADDRINUSE)
-			LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
+			LCONSOLE_ERROR_MSG(0x122,
+					   "Can't start acceptor on port %d: port already in use\n",
 					   accept_port);
 		else
-			LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n",
+			LCONSOLE_ERROR_MSG(0x123,
+					   "Can't start acceptor on port %d: unexpected error %d\n",
 					   accept_port, rc);
 
 		lnet_acceptor_state.pta_sock = NULL;
@@ -390,7 +403,8 @@ lnet_acceptor(void *arg)
 		}
 
 		if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
-			CERROR("Refusing connection from %pI4h: insecure port %d\n",
+			CERROR(
+			       "Refusing connection from %pI4h: insecure port %d\n",
 			       &peer_ip, peer_port);
 			goto failed;
 		}
@@ -398,7 +412,8 @@ lnet_acceptor(void *arg)
 		rc = libcfs_sock_read(newsock, &magic, sizeof(magic),
 				      accept_timeout);
 		if (rc != 0) {
-			CERROR("Error %d reading connection request from %pI4h\n",
+			CERROR(
+				"Error %d reading connection request from %pI4h\n",
 				rc, &peer_ip);
 			goto failed;
 		}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 2230eb0..6af8a5f 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -74,7 +74,8 @@ lnet_get_networks(void)
 	int rc;
 
 	if (*networks != 0 && *ip2nets != 0) {
-		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
+		LCONSOLE_ERROR_MSG(0x101,
+				   "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
 		return NULL;
 	}
 
@@ -189,7 +190,8 @@ static void lnet_assert_wire_constants(void)
 	/* Checks for struct ptl_handle_wire_t */
 	CLASSERT((int)sizeof(lnet_handle_wire_t) = 16);
 	CLASSERT((int)offsetof(lnet_handle_wire_t, wh_interface_cookie) = 0);
-	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) = 8);
+	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) =
+		8);
 	CLASSERT((int)offsetof(lnet_handle_wire_t, wh_object_cookie) = 8);
 	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_object_cookie) = 8);
 
@@ -1085,7 +1087,8 @@ lnet_startup_lndnis(void)
 		mutex_unlock(&the_lnet.ln_lnd_mutex);
 
 		if (rc != 0) {
-			LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
+			LCONSOLE_ERROR_MSG(0x105,
+					   "Error %d starting up LNI %s\n",
 					   rc, libcfs_lnd2str(lnd->lnd_type));
 			lnet_net_lock(LNET_LOCK_EX);
 			lnd->lnd_refcount--;
@@ -1141,7 +1144,8 @@ lnet_startup_lndnis(void)
 
 	if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
 		lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
-		LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n",
+		LCONSOLE_ERROR_MSG(0x109,
+				   "LND %s can only run single-network\n",
 				   libcfs_lnd2str(lnd_type));
 		goto failed;
 	}
@@ -1187,7 +1191,8 @@ LNetInit(void)
 	LASSERT(the_lnet.ln_cpt_number > 0);
 	if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
 		/* we are under risk of consuming all lh_cookie */
-		CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
+		CERROR(
+		       "Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
 		       the_lnet.ln_cpt_number, LNET_CPT_MAX);
 		return -1;
 	}
@@ -1749,7 +1754,8 @@ lnet_ping_target_fini(void)
 }
 
 int
-lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
+lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids,
+	  int n_ids)
 {
 	lnet_handle_eq_t eqh;
 	lnet_handle_md_t mdh;
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 70bc809..8359480 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 368bde9..0c01834 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -82,7 +82,9 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
 	count = cfs_power2_roundup(count);
 
 	if (callback != LNET_EQ_HANDLER_NONE && count != 0)
-		CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
+		CWARN(
+		      "EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n",
+		      count);
 
 	/* count can be 0 if only need callback, we can eliminate
 	 * overhead of enqueue event */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 5856c30..04b1955 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -144,7 +144,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 
 		lmd->md_length = total_length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+		if ((umd->options & LNET_MD_MAX_SIZE) != 0 &&/* max size used */
 		    (umd->max_size < 0 ||
 		     umd->max_size > total_length)) /* illegal max_size */
 			return -EINVAL;
@@ -154,7 +154,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 		lmd->md_iov.iov[0].iov_base = umd->start;
 		lmd->md_iov.iov[0].iov_len = umd->length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+		if ((umd->options & LNET_MD_MAX_SIZE) != 0 &&/* max size used */
 		    (umd->max_size < 0 ||
 		     umd->max_size > (int)umd->length)) /* illegal max_size */
 			return -EINVAL;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 84707c5..4735e9c 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -184,7 +184,8 @@ lnet_try_match_md(lnet_libmd_t *md,
 		mlength = info->mi_rlength;
 	} else if ((md->md_options & LNET_MD_TRUNCATE) = 0) {
 		/* this packet _really_ is too big */
-		CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
+		CERROR(
+		       "Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
 		       libcfs_id2str(info->mi_id), info->mi_mbits,
 		       info->mi_rlength, md->md_length - offset, mlength);
 
@@ -192,7 +193,8 @@ lnet_try_match_md(lnet_libmd_t *md,
 	}
 
 	/* Commit to this ME/MD */
-	CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
+	CDEBUG(D_NET,
+	       "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
 	       (info->mi_opc = LNET_MD_OP_PUT) ? "put" : "get",
 	       info->mi_portal, libcfs_id2str(info->mi_id), mlength,
 	       info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
@@ -685,7 +687,8 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
 		if ((rc & LNET_MATCHMD_OK) != 0) {
 			list_add_tail(&msg->msg_list, matches);
 
-			CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
+			CDEBUG(D_NET,
+			       "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
 			       libcfs_id2str(info.mi_id),
 			       info.mi_portal, info.mi_mbits,
 			       info.mi_roffset, info.mi_rlength);
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 96886a2..1f5629e 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -456,7 +456,8 @@ lnet_check_routes(void)
 
 				lnet_net_unlock(cpt);
 
-				CERROR("Routes to %s via %s and %s not supported\n",
+				CERROR(
+				       "Routes to %s via %s and %s not supported\n",
 				       libcfs_net2str(net),
 				       libcfs_nid2str(nid1),
 				       libcfs_nid2str(nid2));
@@ -1020,7 +1021,8 @@ lnet_router_checker_start(void)
 
 	if (check_routers_before_use &&
 	    dead_router_check_interval <= 0) {
-		LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
+		LCONSOLE_ERROR_MSG(0x10a,
+				   "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
 		return -EINVAL;
 	}
 
@@ -1321,7 +1323,8 @@ lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
 		rb = lnet_new_rtrbuf(rbp, cpt);
 
 		if (rb = NULL) {
-			CERROR("Failed to allocate %d router bufs of %d pages\n",
+			CERROR(
+			       "Failed to allocate %d router bufs of %d pages\n",
 			       nbufs, rbp->rbp_npages);
 			return -ENOMEM;
 		}
@@ -1451,7 +1454,8 @@ lnet_rtrpools_alloc(int im_a_router)
 	} else if (!strcmp(forwarding, "enabled")) {
 		/* explicitly enabled */
 	} else {
-		LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
+		LCONSOLE_ERROR_MSG(0x10b,
+				   "'forwarding' not set to either 'enabled' or 'disabled'\n");
 		return -EINVAL;
 	}
 
@@ -1511,7 +1515,7 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 	unsigned long now = cfs_time_current();
 	int cpt = lnet_cpt_of_nid(nid);
 
-	LASSERT(!in_interrupt ());
+	LASSERT(!in_interrupt());
 
 	CDEBUG(D_NET, "%s notifying %s: %s\n",
 		(ni = NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
@@ -1528,7 +1532,8 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 
 	/* can't do predictions... */
 	if (cfs_time_after(when, now)) {
-		CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
+		CWARN(
+		      "Ignoring prediction from %s of %s %s %ld seconds in the future\n",
 		      (ni = NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
 		      libcfs_nid2str(nid), alive ? "up" : "down",
 		      cfs_duration_sec(cfs_time_sub(when, now)));
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index ee902dc..35f1d14 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -49,7 +49,8 @@ enum {
  */
 #define LNET_PROC_CPT_BITS	(LNET_CPT_BITS + 1)
 /* change version, 16 bits or 8 bits */
-#define LNET_PROC_VER_BITS	max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8)
+#define LNET_PROC_VER_BITS	max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, \
+						    64) / 4, 8)
 
 #define LNET_PROC_HASH_BITS	LNET_PEER_HASH_BITS
 /*
@@ -396,7 +397,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 					      nrefs, nrtrrefs, alive_cnt,
 					      alive ? "up" : "down", last_ping,
 					      pingsent,
-					      cfs_duration_sec(cfs_time_sub(deadline, now)),
+					      cfs_duration_sec(
+						      cfs_time_sub(deadline,
+								   now)),
 					      down_ni, libcfs_nid2str(nid));
 			LASSERT(tmpstr + tmpsiz - s > 0);
 		}
@@ -791,17 +794,20 @@ static struct lnet_portal_rotors	portal_rotors[] = {
 	{
 		.pr_value = LNET_PTL_ROTOR_ON,
 		.pr_name  = "ON",
-		.pr_desc  = "round-robin dispatch all PUT messages for wildcard portals"
+		.pr_desc  +			"round-robin dispatch all PUT messages for wildcard portals"
 	},
 	{
 		.pr_value = LNET_PTL_ROTOR_RR_RT,
 		.pr_name  = "RR_RT",
-		.pr_desc  = "round-robin dispatch routed PUT message for wildcard portals"
+		.pr_desc  +			"round-robin dispatch routed PUT message for wildcard portals"
 	},
 	{
 		.pr_value = LNET_PTL_ROTOR_HASH_RT,
 		.pr_name  = "HASH_RT",
-		.pr_desc  = "dispatch routed PUT message by hashing source NID for wildcard portals"
+		.pr_desc  +			"dispatch routed PUT message by hashing source NID for wildcard portals"
 	},
 	{
 		.pr_value = -1,
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (7 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-22 10:49   ` Dan Carpenter
  2015-05-21 19:50 ` [PATCH v4 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups Mike Shuey
                   ` (13 subsequent siblings)
  22 siblings, 1 reply; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Clean up most checkpatch.pl warnings (primarily line length warnings).

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c    |   25 ++-
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h    |  204 +++++++++++---------
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |  184 +++++++++++-------
 .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c  |   55 ++++--
 4 files changed, 277 insertions(+), 191 deletions(-)

diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index a57c5c3..acfa465 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1166,10 +1166,11 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
 		rx->rx_conn = conn;
 		rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
 
-		rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
-						       rx->rx_msg,
-						       IBLND_MSG_SIZE,
-						       DMA_FROM_DEVICE);
+		rx->rx_msgaddr = kiblnd_dma_map_single(
+						conn->ibc_hdev->ibh_ibdev,
+						rx->rx_msg,
+						IBLND_MSG_SIZE,
+						DMA_FROM_DEVICE);
 		LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
 						   rx->rx_msgaddr));
 		KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
@@ -1387,7 +1388,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
 		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
 		.page_shift        = PAGE_SHIFT,
 		.access            = (IB_ACCESS_LOCAL_WRITE |
-		                      IB_ACCESS_REMOTE_WRITE),
+				      IB_ACCESS_REMOTE_WRITE),
 		.pool_size         = fps->fps_pool_size,
 		.dirty_watermark   = fps->fps_flush_trigger,
 		.flush_function    = NULL,
@@ -1753,7 +1754,8 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
 	if (ps->ps_increasing) {
 		/* another thread is allocating a new pool */
 		spin_unlock(&ps->ps_lock);
-		CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
+		CDEBUG(D_NET,
+		       "Another thread is allocating new %s pool, waiting for her to complete\n",
 		       ps->ps_name);
 		schedule();
 		goto again;
@@ -2354,7 +2356,8 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 	if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
 		/* it's 4T..., assume we will re-code at that time */
-		CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n",
+		CERROR(
+		       "Can't support memory size: x%#llx with MR size: x%#llx\n",
 		       mm_size, mr_size);
 		return -EINVAL;
 	}
@@ -2376,7 +2379,8 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 		mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
 		if (IS_ERR(mr)) {
-			CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
+			CERROR(
+			       "Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
 			       ipb.addr, ipb.size, PTR_ERR(mr));
 			kiblnd_hdev_cleanup_mrs(hdev);
 			return PTR_ERR(mr);
@@ -2389,7 +2393,8 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 out:
 	if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
-		LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n",
+		LCONSOLE_INFO(
+			      "Register global MR array, MR size: %#llx, array size: %d\n",
 			      hdev->ibh_mr_size, hdev->ibh_nmrs);
 	return 0;
 }
@@ -2758,7 +2763,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
 		i = 2;
 		while (atomic_read(&net->ibn_npeers) != 0) {
 			i++;
-			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET, /* 2**n? */
+			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET,/* 2**n? */
 			       "%s: waiting for %d peers to disconnect\n",
 			       libcfs_nid2str(ni->ni_nid),
 			       atomic_read(&net->ibn_npeers));
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 7f52c69..976665b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -48,7 +48,7 @@
 #include <linux/uio.h>
 #include <linux/uaccess.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/fs.h>
 #include <linux/file.h>
@@ -118,17 +118,23 @@ typedef struct {
 extern kib_tunables_t  kiblnd_tunables;
 
 #define IBLND_MSG_QUEUE_SIZE_V1   8 /* V1 only : # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
+#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return
+				     * credits */
 
 #define IBLND_CREDITS_DEFAULT     8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
-#define IBLND_MSG_QUEUE_SIZE(v)    ((v) = IBLND_MSG_VERSION_1 ? \
-				     IBLND_MSG_QUEUE_SIZE_V1 :   \
-				     *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
+/* Max # of peer credits */
+#define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1)
+
+/* # messages/RDMAs in-flight */
+#define IBLND_MSG_QUEUE_SIZE(v)   ((v) = IBLND_MSG_VERSION_1 ? \
+				   IBLND_MSG_QUEUE_SIZE_V1 :    \
+				   *kiblnd_tunables.kib_peertxcredits)
+
+/* when eagerly to return credits */
 #define IBLND_CREDITS_HIGHWATER(v) ((v) = IBLND_MSG_VERSION_1 ? \
-				     IBLND_CREDIT_HIGHWATER_V1 : \
-				     *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+				    IBLND_CREDIT_HIGHWATER_V1 :  \
+				    *kiblnd_tunables.kib_peercredits_hiw)
 
 #define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
 
@@ -144,20 +150,23 @@ kiblnd_concurrent_sends_v1(void)
 	return *kiblnd_tunables.kib_concurrent_sends;
 }
 
-#define IBLND_CONCURRENT_SENDS(v)  ((v) = IBLND_MSG_VERSION_1 ? \
-				     kiblnd_concurrent_sends_v1() : \
-				     *kiblnd_tunables.kib_concurrent_sends)
+#define IBLND_CONCURRENT_SENDS(v) ((v) = IBLND_MSG_VERSION_1 ?           \
+				   kiblnd_concurrent_sends_v1() :         \
+				   *kiblnd_tunables.kib_concurrent_sends)
 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
 #define IBLND_OOB_CAPABLE(v)       ((v) != IBLND_MSG_VERSION_1)
 #define IBLND_OOB_MSGS(v)	   (IBLND_OOB_CAPABLE(v) ? 2 : 0)
 
-#define IBLND_MSG_SIZE	      (4<<10)		 /* max size of queued messages (inc hdr) */
-#define IBLND_MAX_RDMA_FRAGS	 LNET_MAX_IOV	   /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand != 0 ? \
-				    *kiblnd_tunables.kib_map_on_demand :      \
-				     IBLND_MAX_RDMA_FRAGS)  /* max # of fragments configured by user */
+#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
+
+/* max # of fragments configured by user */
+#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+			      *kiblnd_tunables.kib_map_on_demand :      \
+			      IBLND_MAX_RDMA_FRAGS)
 #define IBLND_RDMA_FRAGS(v)	((v) = IBLND_MSG_VERSION_1 ? \
-				     IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
+				     IBLND_MAX_RDMA_FRAGS :   \
+				     IBLND_CFG_RDMA_FRAGS)
 
 /************************/
 /* derived constants... */
@@ -172,14 +181,16 @@ kiblnd_concurrent_sends_v1(void)
 #define IBLND_TX_MSGS()	    (*kiblnd_tunables.kib_ntx)
 
 /* RX messages (per connection) */
-#define IBLND_RX_MSGS(v)	    (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
-#define IBLND_RX_MSG_BYTES(v)       (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(v)      ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+#define IBLND_RX_MSGS(v)      (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
+#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / \
+			       PAGE_SIZE)
 
 /* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(v)	    IBLND_RX_MSGS(v)
-#define IBLND_SEND_WRS(v)	  ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
-#define IBLND_CQ_ENTRIES(v)	 (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+#define IBLND_RECV_WRS(v)   IBLND_RX_MSGS(v)
+#define IBLND_SEND_WRS(v)   ((IBLND_RDMA_FRAGS(v) + 1) *            \
+			     IBLND_CONCURRENT_SENDS(v))
+#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
 
 struct kib_hca_dev;
 
@@ -520,7 +531,9 @@ typedef struct kib_rx                         /* receive message */
 	enum ib_wc_status      rx_status;     /* completion status */
 	kib_msg_t              *rx_msg;       /* message buffer (host vaddr) */
 	__u64                  rx_msgaddr;    /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+
+	DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);  /* for dma_unmap_single() */
+
 	struct ib_recv_wr      rx_wrq;        /* receive work item... */
 	struct ib_sge          rx_sge;        /* ...and its memory */
 } kib_rx_t;
@@ -547,7 +560,9 @@ typedef struct kib_tx                         /* transmit message */
 					       * completion */
 	kib_msg_t              *tx_msg;       /* message buffer (host vaddr) */
 	__u64                  tx_msgaddr;    /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+
+	DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);  /* for dma_unmap_single() */
+
 	int                    tx_nwrq;       /* # send work items */
 	struct ib_send_wr      *tx_wrq;       /* send work items... */
 	struct ib_sge          *tx_sge;       /* ...and their memory */
@@ -648,14 +663,14 @@ extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
 static inline void
 kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
 {
-	LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+	LASSERT(atomic_read(&hdev->ibh_ref) > 0);
 	atomic_inc(&hdev->ibh_ref);
 }
 
 static inline void
 kiblnd_hdev_decref(kib_hca_dev_t *hdev)
 {
-	LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+	LASSERT(atomic_read(&hdev->ibh_ref) > 0);
 	if (atomic_dec_and_test(&hdev->ibh_ref))
 		kiblnd_hdev_destroy(hdev);
 }
@@ -698,44 +713,44 @@ do {									\
 	}								\
 } while (0)
 
-#define kiblnd_peer_addref(peer)				\
-do {							    \
-	CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",		\
-	       (peer), libcfs_nid2str((peer)->ibp_nid),	 \
-	       atomic_read (&(peer)->ibp_refcount));	\
-	atomic_inc(&(peer)->ibp_refcount);		  \
+#define kiblnd_peer_addref(peer)                                        \
+do {                                                                    \
+	CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                        \
+	       (peer), libcfs_nid2str((peer)->ibp_nid),                 \
+	       atomic_read(&(peer)->ibp_refcount));                     \
+	atomic_inc(&(peer)->ibp_refcount);                              \
 } while (0)
 
-#define kiblnd_peer_decref(peer)				\
-do {							    \
-	CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",		\
-	       (peer), libcfs_nid2str((peer)->ibp_nid),	 \
-	       atomic_read (&(peer)->ibp_refcount));	\
-	LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);	      \
-	if (atomic_dec_and_test(&(peer)->ibp_refcount))     \
-		kiblnd_destroy_peer(peer);		      \
+#define kiblnd_peer_decref(peer)                                        \
+do {                                                                    \
+	CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                        \
+	       (peer), libcfs_nid2str((peer)->ibp_nid),                 \
+	       atomic_read(&(peer)->ibp_refcount));                     \
+	LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);                      \
+	if (atomic_dec_and_test(&(peer)->ibp_refcount))                 \
+		kiblnd_destroy_peer(peer);                              \
 } while (0)
 
 static inline struct list_head *
-kiblnd_nid2peerlist (lnet_nid_t nid)
+kiblnd_nid2peerlist(lnet_nid_t nid)
 {
 	unsigned int hash  		((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
 
-	return (&kiblnd_data.kib_peers [hash]);
+	return &kiblnd_data.kib_peers[hash];
 }
 
 static inline int
-kiblnd_peer_active (kib_peer_t *peer)
+kiblnd_peer_active(kib_peer_t *peer)
 {
 	/* Am I in the peer hash table? */
 	return (!list_empty(&peer->ibp_list));
 }
 
 static inline kib_conn_t *
-kiblnd_get_conn_locked (kib_peer_t *peer)
+kiblnd_get_conn_locked(kib_peer_t *peer)
 {
-	LASSERT (!list_empty(&peer->ibp_conns));
+	LASSERT(!list_empty(&peer->ibp_conns));
 
 	/* just return the first connection */
 	return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
@@ -752,7 +767,7 @@ kiblnd_send_keepalive(kib_conn_t *conn)
 static inline int
 kiblnd_need_noop(kib_conn_t *conn)
 {
-	LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
 	if (conn->ibc_outstanding_credits <
 	    IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
@@ -789,7 +804,7 @@ kiblnd_abort_receives(kib_conn_t *conn)
 }
 
 static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
 {
 	if (q = &conn->ibc_tx_queue)
 		return "tx_queue";
@@ -816,43 +831,43 @@ kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
 #define IBLND_WID_MASK  3UL
 
 static inline __u64
-kiblnd_ptr2wreqid (void *ptr, int type)
+kiblnd_ptr2wreqid(void *ptr, int type)
 {
 	unsigned long lptr = (unsigned long)ptr;
 
-	LASSERT ((lptr & IBLND_WID_MASK) = 0);
-	LASSERT ((type & ~IBLND_WID_MASK) = 0);
+	LASSERT((lptr & IBLND_WID_MASK) = 0);
+	LASSERT((type & ~IBLND_WID_MASK) = 0);
 	return (__u64)(lptr | type);
 }
 
 static inline void *
-kiblnd_wreqid2ptr (__u64 wreqid)
+kiblnd_wreqid2ptr(__u64 wreqid)
 {
 	return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
 }
 
 static inline int
-kiblnd_wreqid2type (__u64 wreqid)
+kiblnd_wreqid2type(__u64 wreqid)
 {
 	return (wreqid & IBLND_WID_MASK);
 }
 
 static inline void
-kiblnd_set_conn_state (kib_conn_t *conn, int state)
+kiblnd_set_conn_state(kib_conn_t *conn, int state)
 {
 	conn->ibc_state = state;
 	mb();
 }
 
 static inline void
-kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
 {
 	msg->ibm_type = type;
 	msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
 }
 
 static inline int
-kiblnd_rd_size (kib_rdma_desc_t *rd)
+kiblnd_rd_size(kib_rdma_desc_t *rd)
 {
 	int   i;
 	int   size;
@@ -888,7 +903,7 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
 		rd->rd_frags[index].rf_addr += nob;
 		rd->rd_frags[index].rf_nob  -= nob;
 	} else {
-		index ++;
+		index++;
 	}
 
 	return index;
@@ -897,8 +912,8 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
 static inline int
 kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
 {
-	LASSERT (msgtype = IBLND_MSG_GET_REQ ||
-		 msgtype = IBLND_MSG_PUT_ACK);
+	LASSERT(msgtype = IBLND_MSG_GET_REQ ||
+		msgtype = IBLND_MSG_PUT_ACK);
 
 	return msgtype = IBLND_MSG_GET_REQ ?
 	       offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
@@ -980,67 +995,68 @@ int  kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
 
 int  kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
-			 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
+			 kib_rdma_desc_t *rd, __u64 *iova,
+			 kib_phys_mr_t **pp_pmr);
 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
 
-int  kiblnd_startup (lnet_ni_t *ni);
-void kiblnd_shutdown (lnet_ni_t *ni);
-int  kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+int  kiblnd_startup(lnet_ni_t *ni);
+void kiblnd_shutdown(lnet_ni_t *ni);
+int  kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
+void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
 
 int  kiblnd_tunables_init(void);
 void kiblnd_tunables_fini(void);
 
-int  kiblnd_connd (void *arg);
+int  kiblnd_connd(void *arg);
 int  kiblnd_scheduler(void *arg);
 int  kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
-int  kiblnd_failover_thread (void *arg);
+int  kiblnd_failover_thread(void *arg);
 
 int  kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages (kib_pages_t *p);
+void kiblnd_free_pages(kib_pages_t *p);
 
 int  kiblnd_cm_callback(struct rdma_cm_id *cmid,
 			struct rdma_cm_event *event);
 int  kiblnd_translate_mtu(int value);
 
 int  kiblnd_dev_failover(kib_dev_t *dev);
-int  kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer (kib_peer_t *peer);
-void kiblnd_destroy_dev (kib_dev_t *dev);
-void kiblnd_unlink_peer_locked (kib_peer_t *peer);
-void kiblnd_peer_alive (kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
-void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
-int  kiblnd_close_stale_conns_locked (kib_peer_t *peer,
-				      int version, __u64 incarnation);
-int  kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
+int  kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(kib_peer_t *peer);
+void kiblnd_destroy_dev(kib_dev_t *dev);
+void kiblnd_unlink_peer_locked(kib_peer_t *peer);
+void kiblnd_peer_alive(kib_peer_t *peer);
+kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
+void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+int  kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+				     int version, __u64 incarnation);
+int  kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
 
 void kiblnd_connreq_done(kib_conn_t *conn, int status);
-kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
-				int state, int version);
-void kiblnd_destroy_conn (kib_conn_t *conn);
-void kiblnd_close_conn (kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
-
-int  kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
-		       int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
-void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
+kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+			       int state, int version);
+void kiblnd_destroy_conn(kib_conn_t *conn);
+void kiblnd_close_conn(kib_conn_t *conn, int error);
+void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
+
+int  kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
+		      int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+
+void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
+void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
 			 int status);
-void kiblnd_check_sends (kib_conn_t *conn);
+void kiblnd_check_sends(kib_conn_t *conn);
 
 void kiblnd_qp_event(struct ib_event *event, void *arg);
 void kiblnd_cq_event(struct ib_event *event, void *arg);
 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
 
-void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
 		      int credits, lnet_nid_t dstnid, __u64 dststamp);
 int  kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int  kiblnd_post_rx (kib_rx_t *rx, int credit);
+int  kiblnd_post_rx(kib_rx_t *rx, int credit);
 
 int  kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
 int  kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 477aa8b..250c5fa 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -50,9 +50,9 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
-	LASSERT(!tx->tx_queued);	       /* mustn't be queued for sending */
-	LASSERT(tx->tx_sending = 0);	  /* mustn't be awaiting sent callback */
-	LASSERT(!tx->tx_waiting);	      /* mustn't be awaiting peer response */
+	LASSERT(!tx->tx_queued);      /* mustn't be queued for sending */
+	LASSERT(tx->tx_sending = 0); /* mustn't be awaiting sent callback */
+	LASSERT(!tx->tx_waiting);     /* mustn't be awaiting peer response */
 	LASSERT(tx->tx_pool != NULL);
 
 	kiblnd_unmap_tx(ni, tx);
@@ -410,7 +410,8 @@ kiblnd_handle_rx(kib_rx_t *rx)
 		tx->tx_nwrq = 0;		/* overwrite PUT_REQ */
 
 		rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
-				       kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
+				       kiblnd_rd_size(
+					       &msg->ibm_u.putack.ibpam_rd),
 				       &msg->ibm_u.putack.ibpam_rd,
 				       msg->ibm_u.putack.ibpam_dst_cookie);
 		if (rc2 < 0)
@@ -1003,7 +1004,8 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
 
 	if (failed) {
 		if (conn->ibc_state = IBLND_CONN_ESTABLISHED)
-			CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
+			CNETERR(
+				"Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
 				libcfs_nid2str(conn->ibc_peer->ibp_nid),
 				tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
 				status);
@@ -1114,7 +1116,8 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		}
 
 		if (tx->tx_nwrq = IBLND_RDMA_FRAGS(conn->ibc_version)) {
-			CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
+			CERROR(
+			       "RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid),
 			       IBLND_RDMA_FRAGS(conn->ibc_version),
 			       srcidx, srcrd->rd_nfrags,
@@ -1154,7 +1157,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		sge++;
 	}
 
-	if (rc < 0)			     /* no RDMA if completing with failure */
+	if (rc < 0) /* no RDMA if completing with failure */
 		tx->tx_nwrq = 0;
 
 	ibmsg->ibm_u.completion.ibcm_status = rc;
@@ -1170,8 +1173,8 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
 {
 	struct list_head *q;
 
-	LASSERT(tx->tx_nwrq > 0);	      /* work items set up */
-	LASSERT(!tx->tx_queued);	       /* not queued for sending already */
+	LASSERT(tx->tx_nwrq > 0); /* work items set up */
+	LASSERT(!tx->tx_queued);  /* not queued for sending already */
 	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
 	tx->tx_queued = 1;
@@ -1343,8 +1346,10 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 	/* If I get here, I've committed to send, so I complete the tx with
 	 * failure on any problems */
 
-	LASSERT(tx = NULL || tx->tx_conn = NULL); /* only set when assigned a conn */
-	LASSERT(tx = NULL || tx->tx_nwrq > 0);     /* work items have been set up */
+	LASSERT(tx = NULL || tx->tx_conn = NULL); /* only set when assigned
+						     * a conn */
+	LASSERT(tx = NULL || tx->tx_nwrq > 0);     /* work items have been set
+						     * up */
 
 	/* First time, just use a read lock since I expect to find my peer
 	 * connected */
@@ -1496,7 +1501,9 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 			break;		  /* send IMMEDIATE */
 
 		/* is the REPLY message too small for RDMA? */
-		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+		nob = offsetof(kib_msg_t,
+			       ibm_u.immediate.ibim_payload[
+				       lntmsg->msg_md->md_length]);
 		if (nob <= IBLND_MSG_SIZE)
 			break;		  /* send IMMEDIATE */
 
@@ -1542,15 +1549,17 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 			return -EIO;
 		}
 
-		tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
-		tx->tx_waiting = 1;	     /* waiting for GET_DONE */
+		tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on
+					    * completion */
+		tx->tx_waiting = 1; /* waiting for GET_DONE */
 		kiblnd_launch_tx(ni, tx, target.nid);
 		return 0;
 
 	case LNET_MSG_REPLY:
 	case LNET_MSG_PUT:
 		/* Is the payload small enough not to need RDMA? */
-		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+		nob = offsetof(kib_msg_t,
+			       ibm_u.immediate.ibim_payload[payload_nob]);
 		if (nob <= IBLND_MSG_SIZE)
 			break;		  /* send IMMEDIATE */
 
@@ -1580,10 +1589,11 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 		ibmsg = tx->tx_msg;
 		ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
 		ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
-		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
+				   sizeof(kib_putreq_msg_t));
 
-		tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
-		tx->tx_waiting = 1;	     /* waiting for PUT_{ACK,NAK} */
+		tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+		tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
 		kiblnd_launch_tx(ni, tx, target.nid);
 		return 0;
 	}
@@ -1605,19 +1615,21 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 
 	if (payload_kiov != NULL)
 		lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
-				    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+				    offsetof(kib_msg_t,
+					     ibm_u.immediate.ibim_payload),
 				    payload_niov, payload_kiov,
 				    payload_offset, payload_nob);
 	else
 		lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
-				   offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+				   offsetof(kib_msg_t,
+					    ibm_u.immediate.ibim_payload),
 				   payload_niov, payload_iov,
 				   payload_offset, payload_nob);
 
 	nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
 	kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
 
-	tx->tx_lntmsg[0] = lntmsg;	      /* finalise lntmsg on completion */
+	tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
 	kiblnd_launch_tx(ni, tx, target.nid);
 	return 0;
 }
@@ -1711,7 +1723,8 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
 		if (nob > rx->rx_nob) {
 			CERROR("Immediate message from %s too big: %d(%d)\n",
-				libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
+				libcfs_nid2str(
+				      rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
 				nob, rx->rx_nob);
 			rc = -EPROTO;
 			break;
@@ -1719,22 +1732,25 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 
 		if (kiov != NULL)
 			lnet_copy_flat2kiov(niov, kiov, offset,
-					    IBLND_MSG_SIZE, rxmsg,
-					    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
-					    mlen);
+				    IBLND_MSG_SIZE, rxmsg,
+				    offsetof(kib_msg_t,
+					     ibm_u.immediate.ibim_payload),
+				    mlen);
 		else
 			lnet_copy_flat2iov(niov, iov, offset,
-					   IBLND_MSG_SIZE, rxmsg,
-					   offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
-					   mlen);
+				   IBLND_MSG_SIZE, rxmsg,
+				   offsetof(kib_msg_t,
+					    ibm_u.immediate.ibim_payload),
+				   mlen);
 		lnet_finalize(ni, lntmsg, 0);
 		break;
 
 	case IBLND_MSG_PUT_REQ:
 		if (mlen = 0) {
 			lnet_finalize(ni, lntmsg, 0);
-			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
-					       rxmsg->ibm_u.putreq.ibprm_cookie);
+			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+					0,
+					rxmsg->ibm_u.putreq.ibprm_cookie);
 			break;
 		}
 
@@ -1761,19 +1777,22 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
 			kiblnd_tx_done(ni, tx);
 			/* tell peer it's over */
-			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
-					       rxmsg->ibm_u.putreq.ibprm_cookie);
+			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+					rc,
+					rxmsg->ibm_u.putreq.ibprm_cookie);
 			break;
 		}
 
-		nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
-		txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+		nob = offsetof(kib_putack_msg_t,
+			       ibpam_rd.rd_frags[tx->tx_nfrags]);
+		txmsg->ibm_u.putack.ibpam_src_cookie +				rxmsg->ibm_u.putreq.ibprm_cookie;
 		txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
 
 		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
 
-		tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
-		tx->tx_waiting = 1;	     /* waiting for PUT_DONE */
+		tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+		tx->tx_waiting = 1; /* waiting for PUT_DONE */
 		kiblnd_queue_tx(tx, conn);
 
 		/* reposted buffer reserved for PUT_DONE */
@@ -1883,8 +1902,10 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
 		       libcfs_nid2str(peer->ibp_nid), error,
 		       list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
 		       list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
-		       list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
-		       list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+		       list_empty(&conn->ibc_tx_queue_rsrvd) ? "" :
+			       "(sending_rsrvd)",
+		       list_empty(&conn->ibc_tx_queue_nocred) ? "" :
+			       "(sending_nocred)",
 		       list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
 	}
 
@@ -2191,6 +2212,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	unsigned long flags;
 	int rc;
 	struct sockaddr_in *peer_addr;
+
 	LASSERT(!in_interrupt());
 
 	/* cmid inherits 'context' from the corresponding listener id */
@@ -2206,6 +2228,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	if (*kiblnd_tunables.kib_require_priv_port &&
 	    ntohs(peer_addr->sin_port) >= PROT_SOCK) {
 		__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+
 		CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
 		       &ip, ntohs(peer_addr->sin_port));
 		goto failed;
@@ -2280,8 +2303,10 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
 	if (reqmsg->ibm_u.connparams.ibcp_queue_depth ! 	    IBLND_MSG_QUEUE_SIZE(version)) {
-		CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
-		       libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
+		CERROR(
+		       "Can't accept %s: incompatible queue depth %d (%d wanted)\n",
+		       libcfs_nid2str(nid),
+		       reqmsg->ibm_u.connparams.ibcp_queue_depth,
 		       IBLND_MSG_QUEUE_SIZE(version));
 
 		if (version = IBLND_MSG_VERSION)
@@ -2292,7 +2317,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
 	if (reqmsg->ibm_u.connparams.ibcp_max_frags ! 	    IBLND_RDMA_FRAGS(version)) {
-		CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
+		CERROR(
+		       "Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
 		       libcfs_nid2str(nid), version,
 		       reqmsg->ibm_u.connparams.ibcp_max_frags,
 		       IBLND_RDMA_FRAGS(version));
@@ -2394,17 +2420,19 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
 	conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
 	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
-	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
-		 <= IBLND_RX_MSGS(version));
+	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+		IBLND_OOB_MSGS(version)
+		<= IBLND_RX_MSGS(version));
 
 	ackmsg = &conn->ibc_connvars->cv_msg;
 	memset(ackmsg, 0, sizeof(*ackmsg));
 
 	kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
 			sizeof(ackmsg->ibm_u.connparams));
-	ackmsg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
+	ackmsg->ibm_u.connparams.ibcp_queue_depth  +						IBLND_MSG_QUEUE_SIZE(version);
 	ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-	ackmsg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
+	ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
 
 	kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
 
@@ -2498,10 +2526,12 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
 		break;
 	}
 
-	CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
+	CNETERR(
+		"%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
 		libcfs_nid2str(peer->ibp_nid),
 		reason, IBLND_MSG_VERSION, version,
-		cp != NULL ? cp->ibcp_queue_depth  : IBLND_MSG_QUEUE_SIZE(version),
+		cp != NULL ? cp->ibcp_queue_depth  :
+			     IBLND_MSG_QUEUE_SIZE(version),
 		cp != NULL ? cp->ibcp_max_frags    : IBLND_RDMA_FRAGS(version),
 		cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
 
@@ -2573,14 +2603,16 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
 			if (rej->ibr_magic != IBLND_MSG_MAGIC &&
 			    rej->ibr_magic != LNET_PROTO_MAGIC) {
-				CERROR("%s rejected: consumer defined fatal error\n",
+				CERROR(
+				       "%s rejected: consumer defined fatal error\n",
 				       libcfs_nid2str(peer->ibp_nid));
 				break;
 			}
 
 			if (rej->ibr_version != IBLND_MSG_VERSION &&
 			    rej->ibr_version != IBLND_MSG_VERSION_1) {
-				CERROR("%s rejected: o2iblnd version %x error\n",
+				CERROR(
+				       "%s rejected: o2iblnd version %x error\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       rej->ibr_version);
 				break;
@@ -2588,11 +2620,14 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
 			if (rej->ibr_why     = IBLND_REJECT_FATAL &&
 			    rej->ibr_version = IBLND_MSG_VERSION_1) {
-				CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
-				       libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
+				CDEBUG(D_NET,
+				       "rejected by old version peer %s: %x\n",
+				       libcfs_nid2str(peer->ibp_nid),
+				       rej->ibr_version);
 
 				if (conn->ibc_version != IBLND_MSG_VERSION_1)
-					rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+					rej->ibr_why +						IBLND_REJECT_CONN_UNCOMPAT;
 			}
 
 			switch (rej->ibr_why) {
@@ -2604,7 +2639,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 				break;
 
 			case IBLND_REJECT_MSG_QUEUE_SIZE:
-				CERROR("%s rejected: incompatible message queue depth %d, %d\n",
+				CERROR(
+				       "%s rejected: incompatible message queue depth %d, %d\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       cp != NULL ? cp->ibcp_queue_depth :
 				       IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
@@ -2612,7 +2648,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 				break;
 
 			case IBLND_REJECT_RDMA_FRAGS:
-				CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
+				CERROR(
+				       "%s rejected: incompatible # of RDMA fragments %d, %d\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       cp != NULL ? cp->ibcp_max_frags :
 				       IBLND_RDMA_FRAGS(rej->ibr_version),
@@ -2674,7 +2711,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 	}
 
 	if (ver != msg->ibm_version) {
-		CERROR("%s replied version %x is different with requested version %x\n",
+		CERROR(
+		       "%s replied version %x is different with requested version %x\n",
 		       libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
 		rc = -EPROTO;
 		goto failed;
@@ -2718,7 +2756,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
 	if (rc != 0) {
-		CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
+		CERROR(
+		       "Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), rc,
 		       msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
 		goto failed;
@@ -2727,7 +2766,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 	conn->ibc_incarnation = msg->ibm_srcstamp;
 	conn->ibc_credits  	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
-	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
+	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+		IBLND_OOB_MSGS(ver)
 		 <= IBLND_RX_MSGS(ver));
 
 	kiblnd_connreq_done(conn, 0);
@@ -2764,7 +2804,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-	conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
+	conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
+				  version);
 	if (conn = NULL) {
 		kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
 		kiblnd_peer_decref(peer); /* lose cmid's ref */
@@ -3054,7 +3095,8 @@ kiblnd_check_conns(int idx)
 			}
 
 			if (timedout) {
-				CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
+				CERROR(
+				       "Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       cfs_duration_sec(cfs_time_current() -
 							peer->ibp_last_alive),
@@ -3133,8 +3175,7 @@ kiblnd_connd(void *arg)
 		dropped_lock = 0;
 
 		if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
-			conn = list_entry(kiblnd_data. \
-					      kib_connd_zombies.next,
+			conn = list_entry(kiblnd_data.kib_connd_zombies.next,
 					      kib_conn_t, ibc_list);
 			list_del(&conn->ibc_list);
 
@@ -3169,7 +3210,8 @@ kiblnd_connd(void *arg)
 			const int p = 1;
 			int chunk = kiblnd_data.kib_peer_hash_size;
 
-			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+					       flags);
 			dropped_lock = 1;
 
 			/* Time to check for RDMA timeouts on a few more
@@ -3325,7 +3367,8 @@ kiblnd_scheduler(void *arg)
 
 	rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
 	if (rc != 0) {
-		CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
+		CWARN(
+		      "Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
 		      sched->ibs_cpt);
 	}
 
@@ -3358,8 +3401,11 @@ kiblnd_scheduler(void *arg)
 				rc = ib_req_notify_cq(conn->ibc_cq,
 						      IB_CQ_NEXT_COMP);
 				if (rc < 0) {
-					CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
-					      libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+					CWARN(
+					      "%s: ib_req_notify_cq failed: %d, closing connection\n",
+					      libcfs_nid2str(
+						      conn->ibc_peer->ibp_nid),
+					      rc);
 					kiblnd_close_conn(conn, -EIO);
 					kiblnd_conn_decref(conn);
 					spin_lock_irqsave(&sched->ibs_lock,
@@ -3371,7 +3417,8 @@ kiblnd_scheduler(void *arg)
 			}
 
 			if (rc < 0) {
-				CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
+				CWARN(
+				      "%s: ib_poll_cq failed: %d, closing connection\n",
 				      libcfs_nid2str(conn->ibc_peer->ibp_nid),
 				      rc);
 				kiblnd_close_conn(conn, -EIO);
@@ -3403,7 +3450,7 @@ kiblnd_scheduler(void *arg)
 				spin_lock_irqsave(&sched->ibs_lock, flags);
 			}
 
-			kiblnd_conn_decref(conn); /* ...drop my ref from above */
+			kiblnd_conn_decref(conn);/* ...drop my ref from above */
 			did_something = 1;
 		}
 
@@ -3474,7 +3521,8 @@ kiblnd_failover_thread(void *arg)
 
 			/* failed to failover, retry later */
 			dev->ibd_next_failover -				cfs_time_shift(min(dev->ibd_failed_failover, 10));
+				cfs_time_shift(min(dev->ibd_failed_failover,
+						   10));
 			if (kiblnd_dev_can_failover(dev)) {
 				list_add_tail(&dev->ibd_fail_list,
 					      &kiblnd_data.kib_failed_devs);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b0e0036..90f32ca 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -192,38 +192,55 @@ kiblnd_tunables_init(void)
 		*kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
 
 	if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
-		*kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
+		*kiblnd_tunables.kib_peertxcredits +				*kiblnd_tunables.kib_credits;
 
-	if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
-		*kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
+	if (*kiblnd_tunables.kib_peercredits_hiw <
+	    *kiblnd_tunables.kib_peertxcredits / 2)
+		*kiblnd_tunables.kib_peercredits_hiw +				*kiblnd_tunables.kib_peertxcredits / 2;
 
-	if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
-		*kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
+	if (*kiblnd_tunables.kib_peercredits_hiw >+	    *kiblnd_tunables.kib_peertxcredits)
+		*kiblnd_tunables.kib_peercredits_hiw +				*kiblnd_tunables.kib_peertxcredits - 1;
 
 	if (*kiblnd_tunables.kib_map_on_demand < 0 ||
 	    *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
-		*kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+		/* disable map-on-demand */
+		*kiblnd_tunables.kib_map_on_demand = 0;
 
 	if (*kiblnd_tunables.kib_map_on_demand = 1)
-		*kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
+		/* don't make sense to create map if only one fragment */
+		*kiblnd_tunables.kib_map_on_demand = 2;
 
 	if (*kiblnd_tunables.kib_concurrent_sends = 0) {
 		if (*kiblnd_tunables.kib_map_on_demand > 0 &&
-		    *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
-			*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
+		    *kiblnd_tunables.kib_map_on_demand <+		    IBLND_MAX_RDMA_FRAGS / 8)
+			*kiblnd_tunables.kib_concurrent_sends +				(*kiblnd_tunables.kib_peertxcredits) * 2;
 		else
-			*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
+			*kiblnd_tunables.kib_concurrent_sends +				(*kiblnd_tunables.kib_peertxcredits);
 	}
 
-	if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
-		*kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
-
-	if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
-		*kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
-
-	if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
-		CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
-		      *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
+	if (*kiblnd_tunables.kib_concurrent_sends >
+	    *kiblnd_tunables.kib_peertxcredits * 2)
+		*kiblnd_tunables.kib_concurrent_sends +				*kiblnd_tunables.kib_peertxcredits * 2;
+
+	if (*kiblnd_tunables.kib_concurrent_sends <
+	    *kiblnd_tunables.kib_peertxcredits / 2)
+		*kiblnd_tunables.kib_concurrent_sends +				*kiblnd_tunables.kib_peertxcredits / 2;
+
+	if (*kiblnd_tunables.kib_concurrent_sends <
+	    *kiblnd_tunables.kib_peertxcredits) {
+		CWARN(
+		      "Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
+		      *kiblnd_tunables.kib_concurrent_sends,
+		      *kiblnd_tunables.kib_peertxcredits);
 	}
 
 	return 0;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (8 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH v4 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
                   ` (12 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

More checkpatch.pl warnings & code cleanup.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/lnet/klnds/socklnd/socklnd.c    |   94 +++--
 .../staging/lustre/lnet/klnds/socklnd/socklnd.h    |   20 +-
 .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c |  419 +++++++++++---------
 .../lustre/lnet/klnds/socklnd/socklnd_lib-linux.c  |   24 +-
 .../lustre/lnet/klnds/socklnd/socklnd_lib-linux.h  |    2 +-
 .../lustre/lnet/klnds/socklnd/socklnd_proto.c      |   48 ++-
 6 files changed, 351 insertions(+), 256 deletions(-)

diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 7b5d407..286ef3f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -337,7 +337,8 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 			       &route->ksnr_ipaddr,
 			       &conn->ksnc_myipaddr);
 		} else {
-			CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
+			CDEBUG(D_NET,
+			       "Rebinding %s %pI4h from %pI4h to %pI4h\n",
 			       libcfs_id2str(peer->ksnp_id),
 			       &route->ksnr_ipaddr,
 			       &route->ksnr_myipaddr,
@@ -576,7 +577,8 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
 	if (id.nid != LNET_NID_ANY)
-		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) -
+				ksocknal_data.ksnd_peers);
 	else {
 		lo = 0;
 		hi = ksocknal_data.ksnd_peer_hash_size - 1;
@@ -590,8 +592,10 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 			if (peer->ksnp_ni != ni)
 				continue;
 
-			if (!((id.nid = LNET_NID_ANY || peer->ksnp_id.nid = id.nid) &&
-			      (id.pid = LNET_PID_ANY || peer->ksnp_id.pid = id.pid)))
+			if (!((id.nid = LNET_NID_ANY ||
+			       peer->ksnp_id.nid = id.nid) &&
+			      (id.pid = LNET_PID_ANY ||
+			       peer->ksnp_id.pid = id.pid)))
 				continue;
 
 			ksocknal_peer_addref(peer);     /* a ref for me... */
@@ -805,12 +809,15 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 					if (peer->ksnp_passive_ips[k] = ip)
 						break;
 
-				if (k < peer->ksnp_n_passive_ips) /* using it already */
+				/* using it already */
+				if (k < peer->ksnp_n_passive_ips)
 					continue;
 
-				k = ksocknal_match_peerip(iface, peerips, n_peerips);
+				k = ksocknal_match_peerip(iface, peerips,
+							  n_peerips);
 				xor = ip ^ peerips[k];
-				this_netmatch = ((xor & iface->ksni_netmask) = 0) ? 1 : 0;
+				this_netmatch = ((xor & iface->ksni_netmask) =
+						 0) ? 1 : 0;
 
 				if (!(best_iface = NULL ||
 				      best_netmatch < this_netmatch ||
@@ -973,7 +980,8 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
 
 	LIBCFS_ALLOC(cr, sizeof(*cr));
 	if (cr = NULL) {
-		LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
+		LCONSOLE_ERROR_MSG(0x12f,
+				   "Dropping connection request from %pI4h: memory exhausted\n",
 				   &peer_ip);
 		return -ENOMEM;
 	}
@@ -1257,7 +1265,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
 	conn->ksnc_tx_last_post = cfs_time_current();
 	/* Set the deadline for the outgoing HELLO to drain */
 	conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
-	conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+	conn->ksnc_tx_deadline +		cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 	mb();   /* order with adding to peer's conn list */
 
 	list_add(&conn->ksnc_list, &peer->ksnp_conns);
@@ -1269,7 +1278,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
 
 	/* Take packets blocking for this connection. */
 	list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
-		if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) = SOCKNAL_MATCH_NO)
+		if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) =
+		    SOCKNAL_MATCH_NO)
 				continue;
 
 		list_del(&tx->tx_list);
@@ -1286,7 +1296,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
 	 *	socket callbacks.
 	 */
 
-	CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
+	CDEBUG(D_NET,
+	       "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
 	       libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
 	       &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
 	       conn->ksnc_port, incarnation, cpt,
@@ -1457,8 +1468,8 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
 			spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
 		}
 
-		peer->ksnp_proto = NULL;	/* renegotiate protocol version */
-		peer->ksnp_error = error;       /* stash last conn close reason */
+		peer->ksnp_proto = NULL;  /* renegotiate protocol version */
+		peer->ksnp_error = error; /* stash last conn close reason */
 
 		if (list_empty(&peer->ksnp_routes)) {
 			/* I've just closed last conn belonging to a
@@ -1635,7 +1646,8 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
 	case SOCKNAL_RX_LNET_PAYLOAD:
 		last_rcv = conn->ksnc_rx_deadline -
 			   cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
-		CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
+		CERROR(
+		       "Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
 		       libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
 		       &conn->ksnc_ipaddr, conn->ksnc_port,
 		       conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
@@ -1646,21 +1658,24 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
 		break;
 	case SOCKNAL_RX_LNET_HEADER:
 		if (conn->ksnc_rx_started)
-			CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
+			CERROR(
+			       "Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 			       &conn->ksnc_ipaddr, conn->ksnc_port,
 			       conn->ksnc_proto->pro_version);
 		break;
 	case SOCKNAL_RX_KSM_HEADER:
 		if (conn->ksnc_rx_started)
-			CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
+			CERROR(
+			       "Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 			       &conn->ksnc_ipaddr, conn->ksnc_port,
 			       conn->ksnc_proto->pro_version);
 		break;
 	case SOCKNAL_RX_SLOP:
 		if (conn->ksnc_rx_started)
-			CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
+			CERROR(
+			       "Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 			       &conn->ksnc_ipaddr, conn->ksnc_port);
 	       break;
@@ -1725,7 +1740,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
 	if (id.nid != LNET_NID_ANY)
-		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) -
+				ksocknal_data.ksnd_peers);
 	else {
 		lo = 0;
 		hi = ksocknal_data.ksnd_peer_hash_size - 1;
@@ -1737,11 +1753,14 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
 
 			peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 
-			if (!((id.nid = LNET_NID_ANY || id.nid = peer->ksnp_id.nid) &&
-			      (id.pid = LNET_PID_ANY || id.pid = peer->ksnp_id.pid)))
+			if (!((id.nid = LNET_NID_ANY ||
+			       id.nid = peer->ksnp_id.nid) &&
+			      (id.pid = LNET_PID_ANY ||
+			       id.pid = peer->ksnp_id.pid)))
 				continue;
 
-			count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+			count += ksocknal_close_peer_conns_locked(peer,
+								  ipaddr, 0);
 		}
 	}
 
@@ -1805,7 +1824,8 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 			if (bufnob < conn->ksnc_tx_bufnob) {
 				/* something got ACKed */
 				conn->ksnc_tx_deadline -					cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+					cfs_time_shift(
+					    *ksocknal_tunables.ksnd_timeout);
 				peer->ksnp_last_alive = now;
 				conn->ksnc_tx_bufnob = bufnob;
 			}
@@ -1838,7 +1858,6 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 		ksocknal_launch_all_connections_locked(peer);
 
 	write_unlock_bh(glock);
-	return;
 }
 
 static void
@@ -1962,7 +1981,8 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 						      ksnp_list);
 
 				for (j = 0; j < peer->ksnp_n_passive_ips; j++)
-					if (peer->ksnp_passive_ips[j] = ipaddress)
+					if (peer->ksnp_passive_ips[j] =
+					    ipaddress)
 						iface->ksni_npeers++;
 
 				list_for_each(rtmp, &peer->ksnp_routes) {
@@ -1977,7 +1997,8 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 		}
 
 		rc = 0;
-		/* NB only new connections will pay attention to the new interface! */
+		/* NB only new connections will pay attention to the new
+		 * interface! */
 	}
 
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2064,7 +2085,8 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
 				if (peer->ksnp_ni != ni)
 					continue;
 
-				ksocknal_peer_del_interface_locked(peer, this_ip);
+				ksocknal_peer_del_interface_locked(peer,
+								   this_ip);
 			}
 		}
 	}
@@ -2111,7 +2133,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 
 	case IOC_LIBCFS_DEL_INTERFACE:
 		return ksocknal_del_interface(ni,
-					      data->ioc_u32[0]); /* IP address */
+					      data->ioc_u32[0]);/* IP address */
 
 	case IOC_LIBCFS_GET_PEER: {
 		__u32 myip = 0;
@@ -2153,7 +2175,8 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 		int txmem;
 		int rxmem;
 		int nagle;
-		ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+		ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni,
+							      data->ioc_count);
 
 		if (conn = NULL)
 			return -ENOENT;
@@ -2263,9 +2286,8 @@ ksocknal_base_shutdown(void)
 	case SOCKNAL_INIT_ALL:
 	case SOCKNAL_INIT_DATA:
 		LASSERT(ksocknal_data.ksnd_peers != NULL);
-		for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+		for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
 			LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
-		}
 
 		LASSERT(list_empty(&ksocknal_data.ksnd_nets));
 		LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
@@ -2315,7 +2337,8 @@ ksocknal_base_shutdown(void)
 		read_lock(&ksocknal_data.ksnd_global_lock);
 		while (ksocknal_data.ksnd_nthreads != 0) {
 			i++;
-			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET, /* power of 2? */
+			/* power of 2? */
+			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET,
 			       "waiting for %d threads to terminate\n",
 				ksocknal_data.ksnd_nthreads);
 			read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2446,6 +2469,7 @@ ksocknal_base_startup(void)
 
 	for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
 		char name[16];
+
 		spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 		ksocknal_data.ksnd_connd_starting++;
 		spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
@@ -2503,7 +2527,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 		ksock_route_t *route;
 		ksock_conn_t  *conn;
 
-		CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
+		CWARN(
+		      "Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
 		      libcfs_id2str(peer->ksnp_id),
 		      atomic_read(&peer->ksnp_refcount),
 		      peer->ksnp_sharecount, peer->ksnp_closing,
@@ -2514,7 +2539,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 
 		list_for_each(tmp, &peer->ksnp_routes) {
 			route = list_entry(tmp, ksock_route_t, ksnr_list);
-			CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
+			CWARN(
+			      "Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
 			      atomic_read(&route->ksnr_refcount),
 			      route->ksnr_scheduled, route->ksnr_connecting,
 			      route->ksnr_connected, route->ksnr_deleted);
@@ -2530,7 +2556,6 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 	}
 
 	read_unlock(&ksocknal_data.ksnd_global_lock);
-	return;
 }
 
 void
@@ -2711,6 +2736,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
 		long id;
 		char name[20];
 		ksock_sched_t *sched;
+
 		id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
 		sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
 		snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index c34378c..d270263 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -139,7 +139,7 @@ typedef struct {
 typedef struct {
 	__u64		  ksnn_incarnation;	/* my epoch */
 	spinlock_t	  ksnn_lock;		/* serialise */
-	struct list_head	  ksnn_list;		/* chain on global list */
+	struct list_head  ksnn_list;		/* chain on global list */
 	int		  ksnn_npeers;		/* # peers */
 	int		  ksnn_shutdown;	/* shutting down? */
 	int		  ksnn_ninterfaces;	/* IP interfaces */
@@ -474,7 +474,8 @@ ksocknal_route_mask(void)
 static inline struct list_head *
 ksocknal_nid2peerlist(lnet_nid_t nid)
 {
-	unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
+	unsigned int hash = ((unsigned int)nid) %
+			    ksocknal_data.ksnd_peer_hash_size;
 
 	return &ksocknal_data.ksnd_peers[hash];
 }
@@ -587,8 +588,10 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 		  unsigned int offset, unsigned int mlen, unsigned int rlen);
 int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
 
-extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip,
+			     int port);
+extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni,
+					       lnet_process_id_t id);
 extern ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
 extern void ksocknal_peer_failed(ksock_peer_t *peer);
 extern int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
@@ -613,11 +616,13 @@ extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
 extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
 				  int error);
 extern void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid,
+			   unsigned long *when);
 extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
 extern void ksocknal_thread_fini(void);
 extern void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
+extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t
+							     *peer);
 extern ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
 extern int ksocknal_new_packet(ksock_conn_t *conn, int skip);
 extern int ksocknal_scheduler(void *arg);
@@ -634,7 +639,8 @@ extern void ksocknal_write_callback(ksock_conn_t *conn);
 extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
 extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
 extern void ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn);
-extern void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
+extern void ksocknal_lib_reset_callback(struct socket *sock,
+					ksock_conn_t *conn);
 extern void ksocknal_lib_push_conn(ksock_conn_t *conn);
 extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
 extern int ksocknal_lib_setup_sock(struct socket *so);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index a1a4ac0..38c85ae 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -38,8 +38,8 @@ ksocknal_alloc_tx(int type, int size)
 		spin_lock(&ksocknal_data.ksnd_tx_lock);
 
 		if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-			tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
-					    next, ksock_tx_t, tx_list);
+			tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
+					ksock_tx_t, tx_list);
 			LASSERT(tx->tx_desc_size = size);
 			list_del(&tx->tx_list);
 		}
@@ -91,7 +91,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
 
 
 void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(ksock_tx_t *tx)
 {
 	atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
@@ -108,7 +108,7 @@ ksocknal_free_tx (ksock_tx_t *tx)
 }
 
 static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	struct kvec *iov = tx->tx_iov;
 	int nob;
@@ -123,7 +123,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
 		return rc;
 
 	nob = rc;
-	LASSERT (nob <= tx->tx_resid);
+	LASSERT(nob <= tx->tx_resid);
 	tx->tx_resid -= nob;
 
 	/* "consume" iov */
@@ -145,7 +145,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	lnet_kiov_t *kiov = tx->tx_kiov;
 	int nob;
@@ -161,7 +161,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
 		return rc;
 
 	nob = rc;
-	LASSERT (nob <= tx->tx_resid);
+	LASSERT(nob <= tx->tx_resid);
 	tx->tx_resid -= nob;
 
 	/* "consume" kiov */
@@ -183,7 +183,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	int rc;
 	int bufnob;
@@ -197,7 +197,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 
 	rc = ksocknal_connsock_addref(conn);
 	if (rc != 0) {
-		LASSERT (conn->ksnc_closing);
+		LASSERT(conn->ksnc_closing);
 		return -ESHUTDOWN;
 	}
 
@@ -207,9 +207,9 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 			ksocknal_data.ksnd_enomem_tx--;
 			rc = -EAGAIN;
 		} else if (tx->tx_niov != 0) {
-			rc = ksocknal_send_iov (conn, tx);
+			rc = ksocknal_send_iov(conn, tx);
 		} else {
-			rc = ksocknal_send_kiov (conn, tx);
+			rc = ksocknal_send_kiov(conn, tx);
 		}
 
 		bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
@@ -228,7 +228,8 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 
 		if (rc <= 0) { /* Didn't write anything? */
 
-			if (rc = 0) /* some stacks return 0 instead of -EAGAIN */
+			/* some stacks return 0 instead of -EAGAIN */
+			if (rc = 0)
 				rc = -EAGAIN;
 
 			/* Check if EAGAIN is due to memory pressure */
@@ -239,7 +240,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 		}
 
 		/* socket's wmem_queued now includes 'rc' bytes */
-		atomic_sub (rc, &conn->ksnc_tx_nob);
+		atomic_sub(rc, &conn->ksnc_tx_nob);
 		rc = 0;
 
 	} while (tx->tx_resid != 0);
@@ -249,7 +250,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(ksock_conn_t *conn)
 {
 	struct kvec *iov = conn->ksnc_rx_iov;
 	int nob;
@@ -294,11 +295,12 @@ ksocknal_recv_iov (ksock_conn_t *conn)
 }
 
 static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(ksock_conn_t *conn)
 {
 	lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
 	int nob;
 	int rc;
+
 	LASSERT(conn->ksnc_rx_nkiov > 0);
 
 	/* Never touch conn->ksnc_rx_kiov or change connection
@@ -338,7 +340,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
 }
 
 static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(ksock_conn_t *conn)
 {
 	/* Return 1 on success, 0 on EOF, < 0 on error.
 	 * Caller checks ksnc_rx_nob_wanted to determine
@@ -352,15 +354,15 @@ ksocknal_receive (ksock_conn_t *conn)
 
 	rc = ksocknal_connsock_addref(conn);
 	if (rc != 0) {
-		LASSERT (conn->ksnc_closing);
+		LASSERT(conn->ksnc_closing);
 		return -ESHUTDOWN;
 	}
 
 	for (;;) {
 		if (conn->ksnc_rx_niov != 0)
-			rc = ksocknal_recv_iov (conn);
+			rc = ksocknal_recv_iov(conn);
 		else
-			rc = ksocknal_recv_kiov (conn);
+			rc = ksocknal_recv_kiov(conn);
 
 		if (rc <= 0) {
 			/* error/EOF or partial receive */
@@ -386,7 +388,7 @@ ksocknal_receive (ksock_conn_t *conn)
 }
 
 void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
 {
 	lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
 	int rc = (tx->tx_resid = 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
@@ -399,25 +401,30 @@ ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
 	if (ni = NULL && tx->tx_conn != NULL)
 		ni = tx->tx_conn->ksnc_peer->ksnp_ni;
 
-	ksocknal_free_tx (tx);
+	ksocknal_free_tx(tx);
 	if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
-		lnet_finalize (ni, lnetmsg, rc);
+		lnet_finalize(ni, lnetmsg, rc);
 }
 
 void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
 {
 	ksock_tx_t *tx;
 
-	while (!list_empty (txlist)) {
+	while (!list_empty(txlist)) {
 		tx = list_entry(txlist->next, ksock_tx_t, tx_list);
 
 		if (error && tx->tx_lnetmsg != NULL) {
 			CNETERR("Deleting packet type %d len %d %s->%s\n",
-				le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
-				le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
-				libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
-				libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
+				le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
+				le32_to_cpu(
+					tx->tx_lnetmsg->msg_hdr.payload_length),
+				libcfs_nid2str(
+					le64_to_cpu(
+					tx->tx_lnetmsg->msg_hdr.src_nid)),
+				libcfs_nid2str(
+					le64_to_cpu(
+					tx->tx_lnetmsg->msg_hdr.dest_nid)));
 		} else if (error) {
 			CNETERR("Deleting noop packet\n");
 		}
@@ -500,20 +507,20 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
 }
 
 static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	int rc;
 
 	if (tx->tx_zc_capable && !tx->tx_zc_checked)
 		ksocknal_check_zc_req(tx);
 
-	rc = ksocknal_transmit (conn, tx);
+	rc = ksocknal_transmit(conn, tx);
 
 	CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
 
 	if (tx->tx_resid = 0) {
 		/* Sent everything OK */
-		LASSERT (rc = 0);
+		LASSERT(rc = 0);
 
 		return 0;
 	}
@@ -533,13 +540,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 		/* enomem list takes over scheduler's ref... */
-		LASSERT (conn->ksnc_tx_scheduled);
+		LASSERT(conn->ksnc_tx_scheduled);
 		list_add_tail(&conn->ksnc_tx_list,
 				  &ksocknal_data.ksnd_enomem_conns);
 		if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
 						   SOCKNAL_ENOMEM_RETRY),
 				   ksocknal_data.ksnd_reaper_waketime))
-			wake_up (&ksocknal_data.ksnd_reaper_waitq);
+			wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
 		spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 		return rc;
@@ -551,11 +558,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 	if (!conn->ksnc_closing) {
 		switch (rc) {
 		case -ECONNRESET:
-			LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
+			LCONSOLE_WARN(
+				      "Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
 				      &conn->ksnc_ipaddr);
 			break;
 		default:
-			LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
+			LCONSOLE_WARN(
+				      "There was an unexpected network error while writing to %pI4h: %d.\n",
 				      &conn->ksnc_ipaddr, rc);
 			break;
 		}
@@ -570,14 +579,14 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 		ksocknal_uncheck_zc_req(tx);
 
 	/* it's not an error if conn is being closed */
-	ksocknal_close_conn_and_siblings (conn,
-					  (conn->ksnc_closing) ? 0 : rc);
+	ksocknal_close_conn_and_siblings(conn,
+					 (conn->ksnc_closing) ? 0 : rc);
 
 	return rc;
 }
 
 static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(ksock_route_t *route)
 {
 
 	/* called holding write lock on ksnd_global_lock */
@@ -599,7 +608,7 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
 }
 
 void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
 {
 	ksock_route_t *route;
 
@@ -624,10 +633,10 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 	int tnob = 0;
 	int fnob = 0;
 
-	list_for_each (tmp, &peer->ksnp_conns) {
+	list_for_each(tmp, &peer->ksnp_conns) {
 		ksock_conn_t *c  = list_entry(tmp, ksock_conn_t, ksnc_list);
 		int nob = atomic_read(&c->ksnc_tx_nob) +
-                                      c->ksnc_sock->sk->sk_wmem_queued;
+				      c->ksnc_sock->sk->sk_wmem_queued;
 		int rc;
 
 		LASSERT(!c->ksnc_closing);
@@ -644,8 +653,10 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 
 		case SOCKNAL_MATCH_YES: /* typed connection */
 			if (typed = NULL || tnob > nob ||
-			    (tnob = nob && *ksocknal_tunables.ksnd_round_robin &&
-			     cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+			    (tnob = nob &&
+			     *ksocknal_tunables.ksnd_round_robin &&
+			     cfs_time_after(typed->ksnc_tx_last_post,
+			     c->ksnc_tx_last_post))) {
 				typed = c;
 				tnob  = nob;
 			}
@@ -653,8 +664,10 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 
 		case SOCKNAL_MATCH_MAY: /* fallback connection */
 			if (fallback = NULL || fnob > nob ||
-			    (fnob = nob && *ksocknal_tunables.ksnd_round_robin &&
-			     cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+			    (fnob = nob &&
+			     *ksocknal_tunables.ksnd_round_robin &&
+			     cfs_time_after(fallback->ksnc_tx_last_post,
+			     c->ksnc_tx_last_post))) {
 				fallback = c;
 				fnob = nob;
 			}
@@ -676,13 +689,13 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	conn->ksnc_proto->pro_pack(tx);
 
-	atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+	atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
 	ksocknal_conn_addref(conn); /* +1 ref for tx */
 	tx->tx_conn = conn;
 }
 
 void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
 {
 	ksock_sched_t *sched = conn->ksnc_scheduler;
 	ksock_msg_t *msg = &tx->tx_msg;
@@ -708,14 +721,14 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 	 *
 	 * We always expect at least 1 mapped fragment containing the
 	 * complete ksocknal message header. */
-	LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+	LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
 		lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) =
 		(unsigned int)tx->tx_nob);
 	LASSERT(tx->tx_niov >= 1);
 	LASSERT(tx->tx_resid = tx->tx_nob);
 
-	CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
-		tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
+	CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+		tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
 					       KSOCK_MSG_NOOP,
 		tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
 
@@ -743,7 +756,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
 
 		if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
-			ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
+			ztx = tx; /* ZC ACK piggybacked on ztx release
+				   * tx later */
 
 	} else {
 		/* It's a normal packet - can it piggback a noop zc-ack that
@@ -756,7 +770,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 	}
 
 	if (ztx != NULL) {
-		atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+		atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
 		list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
 	}
 
@@ -764,10 +778,10 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 	    !conn->ksnc_tx_scheduled) { /* not scheduled to send */
 		/* +1 ref for scheduler */
 		ksocknal_conn_addref(conn);
-		list_add_tail (&conn->ksnc_tx_list,
+		list_add_tail(&conn->ksnc_tx_list,
 				   &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 
 	spin_unlock_bh(&sched->kss_lock);
@@ -775,18 +789,18 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 
 
 ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
 {
 	unsigned long now = cfs_time_current();
 	struct list_head *tmp;
 	ksock_route_t *route;
 
-	list_for_each (tmp, &peer->ksnp_routes) {
-		route = list_entry (tmp, ksock_route_t, ksnr_list);
+	list_for_each(tmp, &peer->ksnp_routes) {
+		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
-		if (route->ksnr_scheduled)      /* connections being established */
+		if (route->ksnr_scheduled) /* connections being established */
 			continue;
 
 		/* all route types connected ? */
@@ -811,13 +825,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
 }
 
 ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
 {
 	struct list_head *tmp;
 	ksock_route_t *route;
 
-	list_for_each (tmp, &peer->ksnp_routes) {
-		route = list_entry (tmp, ksock_route_t, ksnr_list);
+	list_for_each(tmp, &peer->ksnp_routes) {
+		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -829,7 +843,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
 }
 
 int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 {
 	ksock_peer_t *peer;
 	ksock_conn_t *conn;
@@ -845,13 +859,15 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 		read_lock(g_lock);
 		peer = ksocknal_find_peer_locked(ni, id);
 		if (peer != NULL) {
-			if (ksocknal_find_connectable_route_locked(peer) = NULL) {
-				conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
+			if (ksocknal_find_connectable_route_locked(peer) =
+			    NULL) {
+				conn = ksocknal_find_conn_locked(peer, tx,
+								 tx->tx_nonblk);
 				if (conn != NULL) {
 					/* I've got no routes that need to be
 					 * connecting and I do have an actual
 					 * connection... */
-					ksocknal_queue_tx_locked (tx, conn);
+					ksocknal_queue_tx_locked(tx, conn);
 					read_unlock(g_lock);
 					return 0;
 				}
@@ -870,7 +886,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 		write_unlock_bh(g_lock);
 
 		if ((id.pid & LNET_PID_USERFLAG) != 0) {
-			CERROR("Refusing to create a connection to userspace process %s\n",
+			CERROR(
+			       "Refusing to create a connection to userspace process %s\n",
 			       libcfs_id2str(id));
 			return -EHOSTUNREACH;
 		}
@@ -895,19 +912,19 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 	conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
 	if (conn != NULL) {
 		/* Connection exists; queue message on it */
-		ksocknal_queue_tx_locked (tx, conn);
+		ksocknal_queue_tx_locked(tx, conn);
 		write_unlock_bh(g_lock);
 		return 0;
 	}
 
 	if (peer->ksnp_accepting > 0 ||
-	    ksocknal_find_connecting_route_locked (peer) != NULL) {
+	    ksocknal_find_connecting_route_locked(peer) != NULL) {
 		/* the message is going to be pinned to the peer */
 		tx->tx_deadline  			cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
 		/* Queue the message until a connection is established */
-		list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+		list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
 		write_unlock_bh(g_lock);
 		return 0;
 	}
@@ -943,8 +960,8 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 	LASSERT(payload_nob = 0 || payload_niov > 0);
 	LASSERT(payload_niov <= LNET_MAX_IOV);
 	/* payload is either all vaddrs or all pages */
-	LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-	LASSERT (!in_interrupt ());
+	LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
+	LASSERT(!in_interrupt());
 
 	if (payload_iov != NULL)
 		desc_size = offsetof(ksock_tx_t,
@@ -1016,7 +1033,7 @@ ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
 }
 
 void
-ksocknal_thread_fini (void)
+ksocknal_thread_fini(void)
 {
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 	ksocknal_data.ksnd_nthreads--;
@@ -1024,7 +1041,7 @@ ksocknal_thread_fini (void)
 }
 
 int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
 {
 	static char ksocknal_slop_buffer[4096];
 
@@ -1047,12 +1064,14 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 		case  KSOCK_PROTO_V2:
 		case  KSOCK_PROTO_V3:
 			conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
-			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
+			conn->ksnc_rx_iov +					(struct kvec *)&conn->ksnc_rx_iov_space;
 			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
 
 			conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
 			conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
-			conn->ksnc_rx_iov[0].iov_len  = offsetof(ksock_msg_t, ksm_u);
+			conn->ksnc_rx_iov[0].iov_len +					offsetof(ksock_msg_t, ksm_u);
 			break;
 
 		case KSOCK_PROTO_V1:
@@ -1061,13 +1080,15 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 			conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
 			conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
 
-			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
-			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
-			conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+			conn->ksnc_rx_iov +					(struct kvec *)&conn->ksnc_rx_iov_space;
+			conn->ksnc_rx_iov[0].iov_base +					&conn->ksnc_msg.ksm_u.lnetmsg;
+			conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
 			break;
 
 		default:
-			LBUG ();
+			LBUG();
 		}
 		conn->ksnc_rx_niov = 1;
 
@@ -1093,10 +1114,10 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 		conn->ksnc_rx_iov[niov].iov_len  = nob;
 		niov++;
 		skipped += nob;
-		nob_to_skip -=nob;
+		nob_to_skip -= nob;
 
 	} while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
-		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
 
 	conn->ksnc_rx_niov = niov;
 	conn->ksnc_rx_kiov = NULL;
@@ -1106,13 +1127,13 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 }
 
 static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(ksock_conn_t *conn)
 {
 	lnet_hdr_t *lhdr;
 	lnet_process_id_t *id;
 	int rc;
 
-	LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+	LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
 
 	/* NB: sched lock NOT held */
 	/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
@@ -1125,24 +1146,27 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		rc = ksocknal_receive(conn);
 
 		if (rc <= 0) {
-			LASSERT (rc != -EAGAIN);
+			LASSERT(rc != -EAGAIN);
 
 			if (rc = 0)
-				CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
+				CDEBUG(D_NET,
+				       "[%p] EOF from %s ip %pI4h:%d\n",
 				       conn,
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 				       &conn->ksnc_ipaddr,
 				       conn->ksnc_port);
 			else if (!conn->ksnc_closing)
-				CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
+				CERROR(
+				       "[%p] Error %d on read from %s ip %pI4h:%d\n",
 				       conn, rc,
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 				       &conn->ksnc_ipaddr,
 				       conn->ksnc_port);
 
 			/* it's not an error if conn is being closed */
-			ksocknal_close_conn_and_siblings (conn,
-							  (conn->ksnc_closing) ? 0 : rc);
+			ksocknal_close_conn_and_siblings(conn,
+							 (conn->ksnc_closing) ?
+							 0 : rc);
 			return (rc = 0 ? -ESHUTDOWN : rc);
 		}
 
@@ -1185,18 +1209,20 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
 			__u64 cookie = 0;
 
-			LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+			LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
 
 			if (conn->ksnc_msg.ksm_type = KSOCK_MSG_NOOP)
 				cookie = conn->ksnc_msg.ksm_zc_cookies[0];
 
 			rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
-					       conn->ksnc_msg.ksm_zc_cookies[1]);
+				       conn->ksnc_msg.ksm_zc_cookies[1]);
 
 			if (rc != 0) {
-				CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
+				CERROR(
+				       "%s: Unknown ZC-ACK cookie: %llu, %llu\n",
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
-				       cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
+				       cookie,
+				       conn->ksnc_msg.ksm_zc_cookies[1]);
 				ksocknal_new_packet(conn, 0);
 				ksocknal_close_conn_and_siblings(conn, -EPROTO);
 				return rc;
@@ -1204,7 +1230,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		}
 
 		if (conn->ksnc_msg.ksm_type = KSOCK_MSG_NOOP) {
-			ksocknal_new_packet (conn, 0);
+			ksocknal_new_packet(conn, 0);
 			return 0;       /* NOOP is done and just return */
 		}
 
@@ -1245,14 +1271,14 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		if (rc < 0) {
 			/* I just received garbage: give up on this conn */
 			ksocknal_new_packet(conn, 0);
-			ksocknal_close_conn_and_siblings (conn, rc);
+			ksocknal_close_conn_and_siblings(conn, rc);
 			ksocknal_conn_decref(conn);
 			return -EPROTO;
 		}
 
 		/* I'm racing with ksocknal_recv() */
-		LASSERT (conn->ksnc_rx_state = SOCKNAL_RX_PARSE ||
-			 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD);
+		LASSERT(conn->ksnc_rx_state = SOCKNAL_RX_PARSE ||
+			conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD);
 
 		if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
 			return 0;
@@ -1289,14 +1315,14 @@ ksocknal_process_receive (ksock_conn_t *conn)
 
 		if (rc != 0) {
 			ksocknal_new_packet(conn, 0);
-			ksocknal_close_conn_and_siblings (conn, rc);
+			ksocknal_close_conn_and_siblings(conn, rc);
 			return -EPROTO;
 		}
 		/* Fall through */
 
 	case SOCKNAL_RX_SLOP:
 		/* starting new packet? */
-		if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+		if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
 			return 0;       /* come back later */
 		goto again;	     /* try to finish reading slop now */
 
@@ -1310,7 +1336,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
 }
 
 int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 	       unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
 	       unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
@@ -1351,8 +1377,8 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 	switch (conn->ksnc_rx_state) {
 	case SOCKNAL_RX_PARSE_WAIT:
 		list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
-		wake_up (&sched->kss_waitq);
-		LASSERT (conn->ksnc_rx_ready);
+		wake_up(&sched->kss_waitq);
+		LASSERT(conn->ksnc_rx_ready);
 		break;
 
 	case SOCKNAL_RX_PARSE:
@@ -1410,7 +1436,7 @@ int ksocknal_scheduler(void *arg)
 
 		/* Ensure I progress everything semi-fairly */
 
-		if (!list_empty (&sched->kss_rx_conns)) {
+		if (!list_empty(&sched->kss_rx_conns)) {
 			conn = list_entry(sched->kss_rx_conns.next,
 					      ksock_conn_t, ksnc_rx_list);
 			list_del(&conn->ksnc_rx_list);
@@ -1443,7 +1469,7 @@ int ksocknal_scheduler(void *arg)
 				conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
 			} else if (conn->ksnc_rx_ready) {
 				/* reschedule for rx */
-				list_add_tail (&conn->ksnc_rx_list,
+				list_add_tail(&conn->ksnc_rx_list,
 						   &sched->kss_rx_conns);
 			} else {
 				conn->ksnc_rx_scheduled = 0;
@@ -1454,7 +1480,7 @@ int ksocknal_scheduler(void *arg)
 			did_something = 1;
 		}
 
-		if (!list_empty (&sched->kss_tx_conns)) {
+		if (!list_empty(&sched->kss_tx_conns)) {
 			LIST_HEAD(zlist);
 
 			if (!list_empty(&sched->kss_zombie_noop_txs)) {
@@ -1465,7 +1491,7 @@ int ksocknal_scheduler(void *arg)
 
 			conn = list_entry(sched->kss_tx_conns.next,
 					      ksock_conn_t, ksnc_tx_list);
-			list_del (&conn->ksnc_tx_list);
+			list_del(&conn->ksnc_tx_list);
 
 			LASSERT(conn->ksnc_tx_scheduled);
 			LASSERT(conn->ksnc_tx_ready);
@@ -1496,7 +1522,8 @@ int ksocknal_scheduler(void *arg)
 			rc = ksocknal_process_transmit(conn, tx);
 
 			if (rc = -ENOMEM || rc = -EAGAIN) {
-				/* Incomplete send: replace tx on HEAD of tx_queue */
+				/* Incomplete send: replace tx on HEAD of
+				 * tx_queue */
 				spin_lock_bh(&sched->kss_lock);
 				list_add(&tx->tx_list,
 					     &conn->ksnc_tx_queue);
@@ -1535,7 +1562,7 @@ int ksocknal_scheduler(void *arg)
 				rc = wait_event_interruptible_exclusive(
 					sched->kss_waitq,
 					!ksocknal_sched_cansleep(sched));
-				LASSERT (rc = 0);
+				LASSERT(rc = 0);
 			} else {
 				cond_resched();
 			}
@@ -1553,7 +1580,7 @@ int ksocknal_scheduler(void *arg)
  * Add connection to kss_rx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(ksock_conn_t *conn)
 {
 	ksock_sched_t *sched;
 
@@ -1570,7 +1597,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
 
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 	spin_unlock_bh(&sched->kss_lock);
 }
@@ -1579,7 +1606,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
 {
 	ksock_sched_t *sched;
 
@@ -1591,20 +1618,20 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
 	if (!conn->ksnc_tx_scheduled && /* not being progressed */
 	    !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
-		list_add_tail (&conn->ksnc_tx_list,
+		list_add_tail(&conn->ksnc_tx_list,
 				   &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
 
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 
 	spin_unlock_bh(&sched->kss_lock);
 }
 
 static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
 {
 	__u32 version = 0;
 
@@ -1634,8 +1661,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
 	if (hello->kshm_magic = le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
 		lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
 
-		CLASSERT(sizeof (lnet_magicversion_t) =
-			 offsetof (ksock_hello_msg_t, kshm_src_nid));
+		CLASSERT(sizeof(lnet_magicversion_t) =
+			 offsetof(ksock_hello_msg_t, kshm_src_nid));
 
 		if (hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
 		    hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
@@ -1646,8 +1673,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
 }
 
 int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-		     lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+		    lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
 {
 	/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
 	ksock_net_t *net = (ksock_net_t *)ni->ni_data;
@@ -1684,9 +1711,9 @@ ksocknal_invert_type(int type)
 }
 
 int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-		     ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
-		     __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+		    ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+		    __u64 *incarnation)
 {
 	/* Return < 0	fatal error
 	 *	0	  success
@@ -1707,17 +1734,18 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 	timeout = active ? *ksocknal_tunables.ksnd_timeout :
 			    lnet_acceptor_timeout();
 
-	rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
+	rc = libcfs_sock_read(sock, &hello->kshm_magic,
+			      sizeof(hello->kshm_magic), timeout);
 	if (rc != 0) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
 			rc, &conn->ksnc_ipaddr);
-		LASSERT (rc < 0);
+		LASSERT(rc < 0);
 		return rc;
 	}
 
 	if (hello->kshm_magic != LNET_PROTO_MAGIC &&
 	    hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
-	    hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+	    hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
 		/* Unexpected magic! */
 		CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
 		       __cpu_to_le32 (hello->kshm_magic),
@@ -1772,7 +1800,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 	*incarnation = hello->kshm_src_incarnation;
 
 	if (hello->kshm_src_nid = LNET_NID_ANY) {
-		CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+		CERROR(
+		       "Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
 		       &conn->ksnc_ipaddr);
 		return -EPROTO;
 	}
@@ -1781,7 +1810,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 	    conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
 		/* Userspace NAL assigns peer process ID from socket */
 		recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
-		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+					 conn->ksnc_ipaddr);
 	} else {
 		recv_id.nid = hello->kshm_src_nid;
 		recv_id.pid = hello->kshm_src_pid;
@@ -1804,7 +1834,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 
 	if (peerid->pid != recv_id.pid ||
 	    peerid->nid != recv_id.nid) {
-		LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
+		LCONSOLE_ERROR_MSG(0x130,
+				   "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
 				   libcfs_id2str(*peerid),
 				   &conn->ksnc_ipaddr,
 				   libcfs_id2str(recv_id));
@@ -1828,7 +1859,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 }
 
 static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(ksock_route_t *route)
 {
 	LIST_HEAD(zombies);
 	ksock_peer_t *peer = route->ksnr_peer;
@@ -1840,7 +1871,8 @@ ksocknal_connect (ksock_route_t *route)
 	int rc = 0;
 
 	deadline = cfs_time_add(cfs_time_current(),
-				cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
+				cfs_time_seconds(
+					*ksocknal_tunables.ksnd_timeout));
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -1864,7 +1896,8 @@ ksocknal_connect (ksock_route_t *route)
 		if (peer->ksnp_accepting > 0) {
 			CDEBUG(D_NET,
 			       "peer %s(%d) already connecting to me, retry later.\n",
-			       libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+			       libcfs_nid2str(peer->ksnp_id.nid),
+			       peer->ksnp_accepting);
 			retry_later = 1;
 		}
 
@@ -1878,7 +1911,7 @@ ksocknal_connect (ksock_route_t *route)
 		} else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
 			type = SOCKLND_CONN_BULK_IN;
 		} else {
-			LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+			LASSERT((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
 			type = SOCKLND_CONN_BULK_OUT;
 		}
 
@@ -1930,9 +1963,10 @@ ksocknal_connect (ksock_route_t *route)
 			 * but the race is resolved quickly usually,
 			 * so min_reconnectms should be good heuristic */
 			route->ksnr_retry_interval -				cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+				cfs_time_seconds(
+				*ksocknal_tunables.ksnd_min_reconnectms)/1000;
 			route->ksnr_timeout = cfs_time_add(cfs_time_current(),
-							   route->ksnr_retry_interval);
+						   route->ksnr_retry_interval);
 		}
 
 		ksocknal_launch_connection_locked(route);
@@ -1951,12 +1985,14 @@ ksocknal_connect (ksock_route_t *route)
 	route->ksnr_retry_interval *= 2;
 	route->ksnr_retry_interval  		max(route->ksnr_retry_interval,
-		    cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+		    cfs_time_seconds(
+			    *ksocknal_tunables.ksnd_min_reconnectms)/1000);
 	route->ksnr_retry_interval  		min(route->ksnr_retry_interval,
-		    cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+		    cfs_time_seconds(
+			    *ksocknal_tunables.ksnd_max_reconnectms)/1000);
 
-	LASSERT (route->ksnr_retry_interval != 0);
+	LASSERT(route->ksnr_retry_interval != 0);
 	route->ksnr_timeout = cfs_time_add(cfs_time_current(),
 					   route->ksnr_retry_interval);
 
@@ -1967,10 +2003,10 @@ ksocknal_connect (ksock_route_t *route)
 
 		/* ksnp_tx_queue is queued on a conn on successful
 		 * connection for V1.x and V2.x */
-		if (!list_empty (&peer->ksnp_conns)) {
+		if (!list_empty(&peer->ksnp_conns)) {
 			conn = list_entry(peer->ksnp_conns.next,
 					      ksock_conn_t, ksnc_list);
-			LASSERT (conn->ksnc_proto = &ksocknal_protocol_v3x);
+			LASSERT(conn->ksnc_proto = &ksocknal_protocol_v3x);
 		}
 
 		/* take all the blocked packets while I've got the lock and
@@ -2106,7 +2142,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
 	now = cfs_time_current();
 
 	/* connd_routes can contain both pending and ordinary routes */
-	list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+	list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
 				 ksnr_connd_list) {
 
 		if (route->ksnr_retry_interval = 0 ||
@@ -2122,7 +2158,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
 }
 
 int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
 {
 	spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
 	ksock_connreq_t *cr;
@@ -2159,8 +2195,8 @@ ksocknal_connd (void *arg)
 
 		if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
 			/* Connection accepted by the listener */
-			cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
-					    next, ksock_connreq_t, ksncr_list);
+			cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+					ksock_connreq_t, ksncr_list);
 
 			list_del(&cr->ksncr_list);
 			spin_unlock_bh(connd_lock);
@@ -2182,7 +2218,7 @@ ksocknal_connd (void *arg)
 			route = ksocknal_connd_get_route_locked(&timeout);
 		}
 		if (route != NULL) {
-			list_del (&route->ksnr_connd_list);
+			list_del(&route->ksnr_connd_list);
 			ksocknal_data.ksnd_connd_connecting++;
 			spin_unlock_bh(connd_lock);
 			dropped_lock = 1;
@@ -2190,7 +2226,8 @@ ksocknal_connd (void *arg)
 			if (ksocknal_connect(route)) {
 				/* consecutive retry */
 				if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
-					CWARN("massive consecutive re-connecting to %pI4h\n",
+					CWARN(
+					      "massive consecutive re-connecting to %pI4h\n",
 					      &route->ksnr_ipaddr);
 					cons_retry = 0;
 				}
@@ -2216,7 +2253,8 @@ ksocknal_connd (void *arg)
 
 		/* Nothing to do for 'timeout'  */
 		set_current_state(TASK_INTERRUPTIBLE);
-		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+					 &wait);
 		spin_unlock_bh(connd_lock);
 
 		nloops = 0;
@@ -2233,15 +2271,16 @@ ksocknal_connd (void *arg)
 }
 
 static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn(ksock_peer_t *peer)
 {
 	/* We're called with a shared lock on ksnd_global_lock */
 	ksock_conn_t *conn;
 	struct list_head *ctmp;
 
-	list_for_each (ctmp, &peer->ksnp_conns) {
+	list_for_each(ctmp, &peer->ksnp_conns) {
 		int error;
-		conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+
+		conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 
 		/* Don't need the {get,put}connsock dance to deref ksnc_sock */
 		LASSERT(!conn->ksnc_closing);
@@ -2254,19 +2293,22 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 
 			switch (error) {
 			case ECONNRESET:
-				CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
+				CNETERR(
+					"A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
 					libcfs_id2str(peer->ksnp_id),
 					&conn->ksnc_ipaddr,
 					conn->ksnc_port);
 				break;
 			case ETIMEDOUT:
-				CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
+				CNETERR(
+					"A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
 					libcfs_id2str(peer->ksnp_id),
 					&conn->ksnc_ipaddr,
 					conn->ksnc_port);
 				break;
 			default:
-				CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
+				CNETERR(
+					"An unexpected network error %d occurred with %s (%pI4h:%d\n",
 					error,
 					libcfs_id2str(peer->ksnp_id),
 					&conn->ksnc_ipaddr,
@@ -2282,7 +2324,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 				     conn->ksnc_rx_deadline)) {
 			/* Timed out incomplete incoming message */
 			ksocknal_conn_addref(conn);
-			CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
+			CNETERR(
+				"Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
 				libcfs_id2str(peer->ksnp_id),
 				&conn->ksnc_ipaddr,
 				conn->ksnc_port,
@@ -2299,7 +2342,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 			/* Timed out messages queued for sending or
 			 * buffered in the socket's send buffer */
 			ksocknal_conn_addref(conn);
-			CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
+			CNETERR(
+				"Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
 				libcfs_id2str(peer->ksnp_id),
 				&conn->ksnc_ipaddr,
 				conn->ksnc_port);
@@ -2318,16 +2362,16 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-	while (!list_empty (&peer->ksnp_tx_queue)) {
-		tx = list_entry (peer->ksnp_tx_queue.next,
+	while (!list_empty(&peer->ksnp_tx_queue)) {
+		tx = list_entry(peer->ksnp_tx_queue.next,
 				     ksock_tx_t, tx_list);
 
 		if (!cfs_time_aftereq(cfs_time_current(),
 				      tx->tx_deadline))
 			break;
 
-		list_del (&tx->tx_list);
-		list_add_tail (&tx->tx_list, &stale_txs);
+		list_del(&tx->tx_list);
+		list_add_tail(&tx->tx_list, &stale_txs);
 	}
 
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2342,7 +2386,8 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 	ksock_conn_t *conn;
 	ksock_tx_t *tx;
 
-	if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+	/* last_alive will be updated by create_conn */
+	if (list_empty(&peer->ksnp_conns))
 		return 0;
 
 	if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2351,7 +2396,8 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 	if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
 	    time_before(cfs_time_current(),
 			cfs_time_add(peer->ksnp_last_alive,
-				     cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
+				     cfs_time_seconds(
+				     *ksocknal_tunables.ksnd_keepalive))))
 		return 0;
 
 	if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
@@ -2397,7 +2443,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 
 
 static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
 {
 	struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
 	ksock_peer_t *peer;
@@ -2420,12 +2466,12 @@ ksocknal_check_peer_timeouts (int idx)
 			goto again;
 		}
 
-		conn = ksocknal_find_timed_out_conn (peer);
+		conn = ksocknal_find_timed_out_conn(peer);
 
 		if (conn != NULL) {
 			read_unlock(&ksocknal_data.ksnd_global_lock);
 
-			ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+			ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 
 			/* NB we won't find this one again, but we can't
 			 * just proceed with the next peer, since we dropped
@@ -2436,9 +2482,9 @@ ksocknal_check_peer_timeouts (int idx)
 
 		/* we can't process stale txs right here because we're
 		 * holding only shared lock */
-		if (!list_empty (&peer->ksnp_tx_queue)) {
+		if (!list_empty(&peer->ksnp_tx_queue)) {
 			ksock_tx_t *tx -				list_entry (peer->ksnp_tx_queue.next,
+				list_entry(peer->ksnp_tx_queue.next,
 						ksock_tx_t, tx_list);
 
 			if (cfs_time_aftereq(cfs_time_current(),
@@ -2483,12 +2529,13 @@ ksocknal_check_peer_timeouts (int idx)
 		spin_unlock(&peer->ksnp_lock);
 		read_unlock(&ksocknal_data.ksnd_global_lock);
 
-		CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
+		CERROR(
+		       "Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
 		       n, libcfs_nid2str(peer->ksnp_id.nid), tx,
 		       cfs_duration_sec(cfs_time_current() - deadline),
 		       resid, conn->ksnc_sock->sk->sk_wmem_queued);
 
-		ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+		ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 		ksocknal_conn_decref(conn);
 		goto again;
 	}
@@ -2497,7 +2544,7 @@ ksocknal_check_peer_timeouts (int idx)
 }
 
 int
-ksocknal_reaper (void *arg)
+ksocknal_reaper(void *arg)
 {
 	wait_queue_t wait;
 	ksock_conn_t *conn;
@@ -2518,11 +2565,11 @@ ksocknal_reaper (void *arg)
 
 	while (!ksocknal_data.ksnd_shuttingdown) {
 
-		if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
-			conn = list_entry (ksocknal_data. \
-					       ksnd_deathrow_conns.next,
-					       ksock_conn_t, ksnc_list);
-			list_del (&conn->ksnc_list);
+		if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+			conn = list_entry(
+					ksocknal_data.ksnd_deathrow_conns.next,
+					ksock_conn_t, ksnc_list);
+			list_del(&conn->ksnc_list);
 
 			spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2533,10 +2580,10 @@ ksocknal_reaper (void *arg)
 			continue;
 		}
 
-		if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
-			conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
-					       next, ksock_conn_t, ksnc_list);
-			list_del (&conn->ksnc_list);
+		if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+			conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+					  ksock_conn_t, ksnc_list);
+			list_del(&conn->ksnc_list);
 
 			spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2546,7 +2593,7 @@ ksocknal_reaper (void *arg)
 			continue;
 		}
 
-		if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+		if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
 			list_add(&enomem_conns,
 				     &ksocknal_data.ksnd_enomem_conns);
 			list_del_init(&ksocknal_data.ksnd_enomem_conns);
@@ -2556,10 +2603,10 @@ ksocknal_reaper (void *arg)
 
 		/* reschedule all the connections that stalled with ENOMEM... */
 		nenomem_conns = 0;
-		while (!list_empty (&enomem_conns)) {
-			conn = list_entry (enomem_conns.next,
+		while (!list_empty(&enomem_conns)) {
+			conn = list_entry(enomem_conns.next,
 					       ksock_conn_t, ksnc_tx_list);
-			list_del (&conn->ksnc_tx_list);
+			list_del(&conn->ksnc_tx_list);
 
 			sched = conn->ksnc_scheduler;
 
@@ -2596,7 +2643,7 @@ ksocknal_reaper (void *arg)
 				chunk = 1;
 
 			for (i = 0; i < chunk; i++) {
-				ksocknal_check_peer_timeouts (peer_index);
+				ksocknal_check_peer_timeouts(peer_index);
 				peer_index = (peer_index + 1) %
 					     ksocknal_data.ksnd_peer_hash_size;
 			}
@@ -2613,16 +2660,16 @@ ksocknal_reaper (void *arg)
 		ksocknal_data.ksnd_reaper_waketime  			cfs_time_add(cfs_time_current(), timeout);
 
-		set_current_state (TASK_INTERRUPTIBLE);
-		add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 		if (!ksocknal_data.ksnd_shuttingdown &&
-		    list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-		    list_empty (&ksocknal_data.ksnd_zombie_conns))
+		    list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+		    list_empty(&ksocknal_data.ksnd_zombie_conns))
 			schedule_timeout(timeout);
 
-		set_current_state (TASK_RUNNING);
-		remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 	}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index caeb347..d8f4960 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -176,7 +176,8 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 		    nob < tx->tx_resid)
 			msg.msg_flags |= MSG_MORE;
 
-		rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
+		rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov,
+				    niov, nob);
 
 		for (i = 0; i < niov; i++)
 			kunmap(kiov[i].kiov_page);
@@ -250,7 +251,8 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
 				fragnob = sum;
 
 			conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
-							   iov[i].iov_base, fragnob);
+							   iov[i].iov_base,
+							   fragnob);
 		}
 		conn->ksnc_msg.ksm_csum = saved_csum;
 	}
@@ -286,7 +288,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
 
 	for (nob = i = 0; i < niov; i++) {
 		if ((kiov[i].kiov_offset != 0 && i > 0) ||
-		    (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+		    (kiov[i].kiov_offset + kiov[i].kiov_len !+		     PAGE_CACHE_SIZE && i < niov - 1))
 			return NULL;
 
 		pages[i] = kiov[i].kiov_page;
@@ -358,9 +361,9 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
 			LASSERT(i < niov);
 
 			/* Dang! have to kmap again because I have nowhere to
-                         * stash the mapped address.  But by doing it while the
-                         * page is still mapped, the kernel just bumps the map
-                         * count and returns me the address it stashed. */
+			 * stash the mapped address.  But by doing it while the
+			 * page is still mapped, the kernel just bumps the map
+			 * count and returns me the address it stashed. */
 			base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
 			fragnob = kiov[i].kiov_len;
 			if (fragnob > sum)
@@ -404,7 +407,8 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
 			base = kmap(tx->tx_kiov[i].kiov_page) +
 			       tx->tx_kiov[i].kiov_offset;
 
-			csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
+			csum = ksocknal_csum(csum, base,
+					     tx->tx_kiov[i].kiov_len);
 
 			kunmap(tx->tx_kiov[i].kiov_page);
 		}
@@ -423,7 +427,8 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
 }
 
 int
-ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem,
+			       int *nagle)
 {
 	struct socket *sock = conn->ksnc_sock;
 	int len;
@@ -666,7 +671,6 @@ ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn)
 	sock->sk->sk_user_data = conn;
 	sock->sk->sk_data_ready = ksocknal_data_ready;
 	sock->sk->sk_write_space = ksocknal_write_space;
-	return;
 }
 
 void
@@ -682,8 +686,6 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 	 * on ksnd_global_lock (to serialise with me) and NOOP if
 	 * sk_user_data is NULL. */
 	sock->sk->sk_user_data = NULL;
-
-	return ;
 }
 
 int
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
index f556388..8524630 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
@@ -71,7 +71,7 @@ static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
 	return crc32_le(crc, p, len);
 #else
 	while (len-- > 0)
-		crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
+		crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff);
 	return crc;
 #endif
 }
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 1938d6a..7d767df 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -65,7 +65,8 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
 	} else {
 		conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
 						       ksock_tx_t, tx_list);
-		LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type = tx->tx_msg.ksm_type);
+		LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type =
+			tx->tx_msg.ksm_type);
 	}
 }
 
@@ -198,16 +199,20 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 	}
 
 	if (tx->tx_msg.ksm_zc_cookies[0] = 0) {
-		/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+		/* NOOP tx has only one ZC-ACK cookie, can carry at least
+		 * one more */
 		if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
-			tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
+			tx->tx_msg.ksm_zc_cookies[0] +				tx->tx_msg.ksm_zc_cookies[1];
 			tx->tx_msg.ksm_zc_cookies[1] = cookie;
 		} else {
 			tx->tx_msg.ksm_zc_cookies[0] = cookie;
 		}
 
-		if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
-			/* not likely to carry more ACKs, skip it to simplify logic */
+		if (tx->tx_msg.ksm_zc_cookies[0] -
+		    tx->tx_msg.ksm_zc_cookies[1] > 2) {
+			/* not likely to carry more ACKs, skip it to
+			 * simplify logic */
 			ksocknal_next_tx_carrier(conn);
 		}
 
@@ -241,7 +246,8 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 		}
 
 	} else {
-		/* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+		/* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range
+		 * of cookies */
 		if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
 		    cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
 			CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -428,7 +434,8 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
 				     &peer->ksnp_zc_req_list, tx_zc_list) {
 		__u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
-		if (c = cookie1 || c = cookie2 || (cookie1 < c && c < cookie2)) {
+		if (c = cookie1 || c = cookie2 ||
+		    (cookie1 < c && c < cookie2)) {
 			tx->tx_msg.ksm_zc_cookies[0] = 0;
 			list_del(&tx->tx_zc_list);
 			list_add(&tx->tx_zc_list, &zlist);
@@ -507,9 +514,8 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
 	if (hello->kshm_nips = 0)
 		goto out;
 
-	for (i = 0; i < (int) hello->kshm_nips; i++) {
+	for (i = 0; i < (int) hello->kshm_nips; i++)
 		hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
-	}
 
 	rc = libcfs_sock_write(sock, hello->kshm_ips,
 			       hello->kshm_nips * sizeof(__u32),
@@ -544,7 +550,8 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
 		LNET_UNLOCK();
 	}
 
-	rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
+	rc = libcfs_sock_write(sock, hello,
+			       offsetof(ksock_hello_msg_t, kshm_ips),
 			       lnet_acceptor_timeout());
 
 	if (rc != 0) {
@@ -645,7 +652,8 @@ out:
 }
 
 static int
-ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello,
+		       int timeout)
 {
 	struct socket *sock = conn->ksnc_sock;
 	int rc;
@@ -658,7 +666,8 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
 
 	rc = libcfs_sock_read(sock, &hello->kshm_src_nid,
 			      offsetof(ksock_hello_msg_t, kshm_ips) -
-				       offsetof(ksock_hello_msg_t, kshm_src_nid),
+				       offsetof(ksock_hello_msg_t,
+						kshm_src_nid),
 			      timeout);
 	if (rc != 0) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
@@ -720,7 +729,8 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
 	tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
 	tx->tx_iov[0].iov_len  = sizeof(lnet_hdr_t);
 
-	tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+	tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len +
+				    sizeof(lnet_hdr_t);
 }
 
 static void
@@ -733,14 +743,18 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx)
 
 		tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
 		tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
-		tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+		tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) +
+					    tx->tx_lnetmsg->msg_len;
 	} else {
 		LASSERT(tx->tx_msg.ksm_type = KSOCK_MSG_NOOP);
 
-		tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
-		tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
+		tx->tx_iov[0].iov_len = offsetof(ksock_msg_t,
+						 ksm_u.lnetmsg.ksnm_hdr);
+		tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t,
+						     ksm_u.lnetmsg.ksnm_hdr);
 	}
-	/* Don't checksum before start sending, because packet can be piggybacked with ACK */
+	/* Don't checksum before start sending, because packet can be
+	 * piggybacked with ACK */
 }
 
 static void
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH v4 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (9 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
                   ` (11 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Cleaning up more checkpatch.pl issues.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/selftest/brw_test.c  |    3 +-
 drivers/staging/lustre/lnet/selftest/conctl.c    |    4 +-
 drivers/staging/lustre/lnet/selftest/conrpc.c    |   41 ++++++++--------
 drivers/staging/lustre/lnet/selftest/conrpc.h    |    7 ++-
 drivers/staging/lustre/lnet/selftest/console.c   |   33 ++++++------
 drivers/staging/lustre/lnet/selftest/console.h   |   14 +++--
 drivers/staging/lustre/lnet/selftest/framework.c |   46 ++++++++---------
 drivers/staging/lustre/lnet/selftest/module.c    |    3 +-
 drivers/staging/lustre/lnet/selftest/ping_test.c |    1 -
 drivers/staging/lustre/lnet/selftest/rpc.c       |   29 +++++------
 drivers/staging/lustre/lnet/selftest/selftest.h  |   58 ++++++++++------------
 drivers/staging/lustre/lnet/selftest/timer.h     |    2 +-
 12 files changed, 119 insertions(+), 122 deletions(-)

diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index de11f1b..fd3383f 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -463,7 +463,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
 		npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
 
 	} else {
-		npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >>
+		      PAGE_CACHE_SHIFT;
 	}
 
 	replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 1a7870e..86cb031 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -752,7 +752,8 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
 	/* have parameter, check if parameter length is valid */
 	if (args->lstio_tes_param != NULL &&
 	    (args->lstio_tes_param_len <= 0 ||
-	     args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+	     args->lstio_tes_param_len > PAGE_CACHE_SIZE -
+	     sizeof(lstcon_test_t)))
 		return -EINVAL;
 
 	LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -925,5 +926,4 @@ out:
 
 	return rc;
 }
-
 EXPORT_SYMBOL(lstcon_ioctl_entry);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index a1a4e08..c0ad353 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -455,14 +455,13 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
 		      lstcon_session_feats_check(trans->tas_features);
 	}
 
-	CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
+	CDEBUG(D_NET,
+	       "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
 	       lstcon_rpc_trans_name(trans->tas_opc),
 	       lstcon_rpc_stat_success(stat, 0),
 	       lstcon_rpc_stat_failure(stat, 0),
 	       lstcon_rpc_stat_total(stat, 0),
 	       stat->trs_rpc_errno, stat->trs_fwk_errno);
-
-	return;
 }
 
 int
@@ -564,10 +563,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
 			continue;
 		}
 
-		/* rpcs can be still not callbacked (even LNetMDUnlink is called)
-		 * because huge timeout for inaccessible network, don't make
-		 * user wait for them, just abandon them, they will be recycled
-		 * in callback */
+		/* rpcs can be still not callbacked (even LNetMDUnlink is
+		 * called) because huge timeout for inaccessible network,
+		 * don't make user wait for them, just abandon them, they
+		 * will be recycled in callback */
 
 		LASSERT(crpc->crp_status != 0);
 
@@ -591,8 +590,6 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
 	       lstcon_rpc_trans_name(trans->tas_opc), count);
 
 	LIBCFS_FREE(trans, sizeof(*trans));
-
-	return;
 }
 
 int
@@ -947,9 +944,10 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
 	}
 
 	if (reply->msg_ses_feats != trans->tas_features) {
-		CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
-			 reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
-			 trans->tas_features);
+		CNETERR(
+			"Framework features %x from %s is different with features on this transaction: %x\n",
+			reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
+			trans->tas_features);
 		status = mksn_rep->mksn_status = EPROTO;
 	}
 
@@ -1077,8 +1075,6 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
 
 	if (stat->trs_fwk_errno = 0)
 		stat->trs_fwk_errno = rc;
-
-	return;
 }
 
 int
@@ -1111,8 +1107,9 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
 			continue;
 
 		if (rc < 0) {
-			CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
-					transop, rc);
+			CDEBUG(D_NET,
+			       "Condition error while creating RPC for transaction %d: %d\n",
+			       transop, rc);
 			break;
 		}
 
@@ -1278,7 +1275,8 @@ lstcon_rpc_pinger(void *arg)
 
 	CDEBUG(D_NET, "Ping %d nodes in session\n", count);
 
-	ptimer->stt_expires = (unsigned long)(get_seconds() + LST_PING_INTERVAL);
+	ptimer->stt_expires = (unsigned long)(get_seconds() +
+			      LST_PING_INTERVAL);
 	stt_add_timer(ptimer);
 
 	mutex_unlock(&console_session.ses_mutex);
@@ -1301,7 +1299,8 @@ lstcon_rpc_pinger_start(void)
 	}
 
 	ptimer = &console_session.ses_ping_timer;
-	ptimer->stt_expires = (unsigned long)(get_seconds() + LST_PING_INTERVAL);
+	ptimer->stt_expires = (unsigned long)(get_seconds() +
+			      LST_PING_INTERVAL);
 
 	stt_add_timer(ptimer);
 
@@ -1349,7 +1348,8 @@ lstcon_rpc_cleanup_wait(void)
 
 		mutex_unlock(&console_session.ses_mutex);
 
-		CWARN("Session is shutting down, waiting for termination of transactions\n");
+		CWARN(
+		      "Session is shutting down, waiting for termination of transactions\n");
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(cfs_time_seconds(1));
 
@@ -1381,7 +1381,8 @@ lstcon_rpc_module_init(void)
 {
 	INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
 	console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger;
-	console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer;
+	console_session.ses_ping_timer.stt_data +		&console_session.ses_ping_timer;
 
 	console_session.ses_ping = NULL;
 
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 7d33cf9..5afb679 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -54,7 +54,8 @@
 #define LST_TRANS_TIMEOUT       30
 #define LST_TRANS_MIN_TIMEOUT   3
 
-#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
+#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), \
+					LST_TRANS_TIMEOUT)
 
 #define LST_PING_INTERVAL       8
 
@@ -105,8 +106,8 @@ typedef struct lstcon_rpc_trans {
 
 #define LST_TRANS_STATQRY       0x21
 
-typedef int (* lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (* lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
+typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
+typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
 
 int  lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
 			unsigned version, lstcon_rpc_t **crpc);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index f47c8f2..ed9c38f 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -77,7 +77,8 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
 
 	LASSERT(id.nid != LNET_NID_ANY);
 
-	list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) {
+	list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx],
+			    ndl_hlink) {
 		if (ndl->ndl_node->nd_id.nid != id.nid ||
 		    ndl->ndl_node->nd_id.pid != id.pid)
 			continue;
@@ -253,9 +254,8 @@ lstcon_group_decref(lstcon_group_t *grp)
 
 	lstcon_group_drain(grp, 0);
 
-	for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+	for (i = 0; i < LST_NODE_HASHSIZE; i++)
 		LASSERT(list_empty(&grp->grp_ndl_hash[i]));
-	}
 
 	LIBCFS_FREE(grp, offsetof(lstcon_group_t,
 				  grp_ndl_hash[LST_NODE_HASHSIZE]));
@@ -308,7 +308,7 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
 {
 	list_del_init(&ndl->ndl_link);
 	lstcon_ndlink_release(ndl);
-	grp->grp_nnode --;
+	grp->grp_nnode--;
 }
 
 static void
@@ -320,13 +320,11 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
 
 	list_del(&ndl->ndl_hlink);
 	list_del(&ndl->ndl_link);
-	old->grp_nnode --;
+	old->grp_nnode--;
 
 	list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
 	list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
 	new->grp_nnode++;
-
-	return;
 }
 
 static void
@@ -527,7 +525,7 @@ lstcon_group_add(char *name)
 	lstcon_group_t *grp;
 	int rc;
 
-	rc = (lstcon_group_find(name, &grp) = 0)? -EEXIST: 0;
+	rc = (lstcon_group_find(name, &grp) = 0) ? -EEXIST : 0;
 	if (rc != 0) {
 		/* find a group with same name */
 		lstcon_group_put(grp);
@@ -816,7 +814,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
 		LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
 
 	rc = copy_to_user(gents_p, gentp,
-			      sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
+			      sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
 
 	LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
 
@@ -847,7 +845,7 @@ lstcon_batch_add(char *name)
 	int i;
 	int rc;
 
-	rc = (lstcon_batch_find(name, &bat) = 0)? -EEXIST: 0;
+	rc = (lstcon_batch_find(name, &bat) = 0) ? -EEXIST : 0;
 	if (rc != 0) {
 		CDEBUG(D_NET, "Batch %s already exists\n", name);
 		return rc;
@@ -911,7 +909,7 @@ lstcon_batch_list(int index, int len, char *name_up)
 	list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
 		if (index-- = 0) {
 			return copy_to_user(name_up, bat->bat_name, len) ?
-			       -EFAULT: 0;
+			       -EFAULT : 0;
 		}
 	}
 
@@ -956,7 +954,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
 				  &test->tes_dst_grp->grp_ndl_list;
 
 	if (dents_up != NULL) {
-		rc = lstcon_nodes_getent((server ? srvlst: clilst),
+		rc = lstcon_nodes_getent((server ? srvlst : clilst),
 					 index_p, ndent_p, dents_up);
 		return rc;
 	}
@@ -1208,7 +1206,8 @@ again:
 
 		lstcon_rpc_trans_destroy(trans);
 		/* return if any error */
-		CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n",
+		CDEBUG(D_NET,
+		       "Failed to add test %s, RPC error %d, framework error %d\n",
 		       transop = LST_TRANS_TSBCLIADD ? "client" : "server",
 		       lstcon_trans_stat()->trs_rpc_errno,
 		       lstcon_trans_stat()->trs_fwk_errno);
@@ -1683,7 +1682,7 @@ int
 lstcon_session_match(lst_sid_t sid)
 {
 	return (console_session.ses_id.ses_nid   = sid.ses_nid &&
-		console_session.ses_id.ses_stamp = sid.ses_stamp) ?  1: 0;
+		console_session.ses_id.ses_stamp = sid.ses_stamp) ? 1 : 0;
 }
 
 static void
@@ -1884,7 +1883,8 @@ lstcon_session_feats_check(unsigned feats)
 	spin_unlock(&console_session.ses_rpc_lock);
 
 	if (rc != 0) {
-		CERROR("remote features %x do not match with session features %x of console\n",
+		CERROR(
+		       "remote features %x do not match with session features %x of console\n",
 		       feats, console_session.ses_features);
 	}
 
@@ -2083,9 +2083,8 @@ lstcon_console_fini(void)
 	LASSERT(list_empty(&console_session.ses_bat_list));
 	LASSERT(list_empty(&console_session.ses_trans_list));
 
-	for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+	for (i = 0; i < LST_NODE_HASHSIZE; i++)
 		LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
-	}
 
 	LIBCFS_FREE(console_session.ses_ndl_hash,
 		    sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index c4cf0ae..7638042 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -116,9 +116,11 @@ typedef struct lstcon_test {
 	int              tes_oneside;    /* one-sided test */
 	int              tes_concur;     /* concurrency */
 	int              tes_loop;       /* loop count */
-	int              tes_dist;       /* nodes distribution of target group */
+	int              tes_dist;       /* nodes distribution of target
+					  * group */
 	int              tes_span;       /* nodes span of target group */
-	int              tes_cliidx;     /* client index, used for RPC creating */
+	int              tes_cliidx;     /* client index, used for RPC
+					  * creating */
 
 	struct list_head tes_trans_list; /* transaction list */
 	lstcon_group_t   *tes_src_grp;   /* group run the test */
@@ -177,7 +179,7 @@ lstcon_trans_stat(void)
 }
 
 static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
 {
 	unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
 
@@ -191,7 +193,8 @@ extern int lstcon_session_match(lst_sid_t sid);
 extern int lstcon_session_new(char *name, int key, unsigned version,
 			      int timeout, int flags, lst_sid_t *sid_up);
 extern int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
-			       lstcon_ndlist_ent_t *entp, char *name_up, int len);
+			       lstcon_ndlist_ent_t *entp, char *name_up,
+			       int len);
 extern int lstcon_session_end(void);
 extern int lstcon_session_debug(int timeout, struct list_head *result_up);
 extern int lstcon_session_feats_check(unsigned feats);
@@ -210,7 +213,8 @@ extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
 extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
 			       struct list_head *result_up);
 extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
-			     int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
+			     int *index_p, int *ndent_p,
+			     lstcon_node_ent_t *ndents_up);
 extern int lstcon_group_list(int idx, int len, char *name_up);
 extern int lstcon_batch_add(char *name);
 extern int lstcon_batch_run(char *name, int timeout,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 7c5185a..8d948e2 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -173,7 +173,6 @@ sfw_add_session_timer(void)
 	timer->stt_expires = cfs_time_add(sn->sn_timeout,
 					  get_seconds());
 	stt_add_timer(timer);
-	return;
 }
 
 static int
@@ -203,7 +202,8 @@ sfw_deactivate_session(void)
 	sfw_batch_t *tsb;
 	sfw_test_case_t *tsc;
 
-	if (sn = NULL) return;
+	if (sn = NULL)
+		return;
 
 	LASSERT(!sn->sn_timer_active);
 
@@ -298,7 +298,6 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
 
 	if (rpc->srpc_bulk != NULL)
 		sfw_free_pages(rpc);
-	return;
 }
 
 static void
@@ -573,7 +572,8 @@ sfw_load_test(struct sfw_test_instance *tsi)
 
 	rc = srpc_service_add_buffers(svc, nbuf);
 	if (rc != 0) {
-		CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
+		CWARN(
+		      "Failed to reserve enough buffers: service %s, %d needed: %d\n",
 		      svc->sv_name, nbuf, rc);
 		/* NB: this error handler is not strictly correct, because
 		 * it may release more buffers than already allocated,
@@ -604,7 +604,6 @@ sfw_unload_test(struct sfw_test_instance *tsi)
 	 * some buffers behind, but never mind... */
 	srpc_service_remove_buffers(tsc->tsc_srv_service,
 				    sfw_test_buffers(tsi));
-	return;
 }
 
 static void
@@ -613,7 +612,8 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
 	srpc_client_rpc_t *rpc;
 	sfw_test_unit_t *tsu;
 
-	if (!tsi->tsi_is_client) goto clean;
+	if (!tsi->tsi_is_client)
+		goto clean;
 
 	tsi->tsi_ops->tso_fini(tsi);
 
@@ -638,7 +638,6 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
 clean:
 	sfw_unload_test(tsi);
 	LIBCFS_FREE(tsi, sizeof(*tsi));
-	return;
 }
 
 static void
@@ -657,7 +656,6 @@ sfw_destroy_batch(sfw_batch_t *tsb)
 	}
 
 	LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
-	return;
 }
 
 void
@@ -677,7 +675,6 @@ sfw_destroy_session(sfw_session_t *sn)
 
 	LIBCFS_FREE(sn, sizeof(*sn));
 	atomic_dec(&sfw_data.fw_nzombies);
-	return;
 }
 
 static void
@@ -722,7 +719,6 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
 	}
 
 	LBUG();
-	return;
 }
 
 static int
@@ -785,7 +781,8 @@ sfw_add_test_instance(sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
 		lnet_process_id_packed_t id;
 		int j;
 
-		dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
+		dests = page_address(bk->bk_iovs[i /
+						 SFW_ID_PER_PAGE].kiov_page);
 		LASSERT(dests != NULL);  /* my pages are within KVM always */
 		id = dests[i % SFW_ID_PER_PAGE];
 		if (msg->msg_magic != SRPC_MSG_MAGIC)
@@ -860,7 +857,6 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
 	spin_unlock(&sfw_data.fw_lock);
 
 	sfw_destroy_session(sn);
-	return;
 }
 
 static void
@@ -896,7 +892,6 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
 	}
 
 	sfw_test_unit_done(tsu);
-	return;
 }
 
 int
@@ -1019,7 +1014,7 @@ sfw_run_batch(sfw_batch_t *tsb)
 			tsu->tsu_loop = tsi->tsi_loop;
 			wi = &tsu->tsu_worker;
 			swi_init_workitem(wi, tsu, sfw_run_test,
-					  lst_sched_test[\
+					  lst_sched_test[
 					  lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
 			swi_schedule_workitem(wi);
 		}
@@ -1268,7 +1263,8 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
 
 		if (sn != NULL &&
 		    sn->sn_features != request->msg_ses_feats) {
-			CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
+			CNETERR(
+				"Features of framework RPC don't match features of current session: %x/%x\n",
 				request->msg_ses_feats, sn->sn_features);
 			reply->msg_body.reply.status = EPROTO;
 			reply->msg_body.reply.sid    = sn->sn_id;
@@ -1345,7 +1341,8 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
 	spin_lock(&sfw_data.fw_lock);
 
 	if (status != 0) {
-		CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
+		CERROR(
+		       "Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
 		       sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
 		spin_unlock(&sfw_data.fw_lock);
 		return -EIO;
@@ -1556,7 +1553,6 @@ sfw_unpack_message(srpc_msg_t *msg)
 	}
 
 	LBUG();
-	return;
 }
 
 void
@@ -1568,7 +1564,6 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
 	spin_lock(&rpc->crpc_lock);
 	srpc_abort_rpc(rpc, -EINTR);
 	spin_unlock(&rpc->crpc_lock);
-	return;
 }
 
 void
@@ -1585,7 +1580,6 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
 	srpc_post_rpc(rpc);
 
 	spin_unlock(&rpc->crpc_lock);
-	return;
 }
 
 static srpc_service_t sfw_services[] = {
@@ -1660,7 +1654,8 @@ sfw_startup(void)
 	}
 
 	if (session_timeout = 0)
-		CWARN("Zero session_timeout specified - test sessions never expire.\n");
+		CWARN(
+		      "Zero session_timeout specified - test sessions never expire.\n");
 
 	if (rpc_timeout = 0)
 		CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
@@ -1700,7 +1695,8 @@ sfw_startup(void)
 
 	for (i = 0; ; i++) {
 		sv = &sfw_services[i];
-		if (sv->sv_name = NULL) break;
+		if (sv->sv_name = NULL)
+			break;
 
 		sv->sv_bulk_ready = NULL;
 		sv->sv_handler    = sfw_handle_server_rpc;
@@ -1717,11 +1713,13 @@ sfw_startup(void)
 		}
 
 		/* about to sfw_shutdown, no need to add buffer */
-		if (error) continue;
+		if (error)
+			continue;
 
 		rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
 		if (rc != 0) {
-			CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
+			CWARN(
+			      "Failed to reserve enough buffers: service %s, %d needed: %d\n",
 			      sv->sv_name, sv->sv_wi_total, rc);
 			error = -ENOMEM;
 		}
@@ -1799,6 +1797,4 @@ sfw_shutdown(void)
 		list_del(&tsc->tsc_list);
 		LIBCFS_FREE(tsc, sizeof(*tsc));
 	}
-
-	return;
 }
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 09b8f46..1afc581 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -118,7 +118,8 @@ lnet_selftest_init(void)
 		rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
 					 nthrs, &lst_sched_test[i]);
 		if (rc != 0) {
-			CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
+			CERROR(
+			       "Failed to create CPT affinity WI scheduler %d for LST\n",
 			       i);
 			goto error;
 		}
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 1dab998..6d48e06 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -165,7 +165,6 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
 	CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
 		(unsigned)((tv.tv_sec - (unsigned)reqst->pnr_time_sec) * 1000000
 			   + (tv.tv_usec - reqst->pnr_time_usec)));
-	return;
 }
 
 static int
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 59cf01f..41a09fa 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -118,7 +118,6 @@ srpc_free_bulk(srpc_bulk_t *bk)
 	}
 
 	LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
-	return;
 }
 
 srpc_bulk_t *
@@ -145,7 +144,8 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
 		struct page *pg;
 		int nob;
 
-		pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
+		pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(),
+							  cpt),
 				      GFP_IOFS, 0);
 		if (pg = NULL) {
 			CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
@@ -680,7 +680,8 @@ srpc_finish_service(struct srpc_service *sv)
 
 		rpc = list_entry(scd->scd_rpc_active.next,
 				     struct srpc_server_rpc, srpc_list);
-		CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
+		CNETERR(
+			"Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
 			rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
 			swi_state2str(rpc->srpc_wi.swi_state),
 			rpc->srpc_wi.swi_workitem.wi_scheduled,
@@ -952,7 +953,6 @@ srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
 	}
 
 	spin_unlock(&scd->scd_lock);
-	return;
 }
 
 /* handles an incoming RPC */
@@ -1103,7 +1103,6 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
 	timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
 					  get_seconds());
 	stt_add_timer(timer);
-	return;
 }
 
 /*
@@ -1166,7 +1165,6 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
 	spin_unlock(&rpc->crpc_lock);
 
 	(*rpc->crpc_done)(rpc);
-	return;
 }
 
 /* sends an outgoing RPC */
@@ -1244,7 +1242,8 @@ srpc_send_rpc(swi_workitem_t *wi)
 		if (reply->msg_type != type ||
 		    (reply->msg_magic != SRPC_MSG_MAGIC &&
 		     reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
-			CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
+			CWARN(
+			      "Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
 			      libcfs_id2str(rpc->crpc_dest),
 			      reply->msg_type, type,
 			      reply->msg_magic, SRPC_MSG_MAGIC);
@@ -1253,7 +1252,8 @@ srpc_send_rpc(swi_workitem_t *wi)
 		}
 
 		if (do_bulk && reply->msg_body.reply.status != 0) {
-			CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
+			CWARN(
+			      "Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
 			      reply->msg_body.reply.status,
 			      libcfs_id2str(rpc->crpc_dest));
 			LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
@@ -1336,7 +1336,6 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
 	rpc->crpc_aborted = 1;
 	rpc->crpc_status  = why;
 	swi_schedule_workitem(&rpc->crpc_wi);
-	return;
 }
 
 /* called with rpc->crpc_lock held */
@@ -1352,7 +1351,6 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
 
 	srpc_add_client_rpc_timer(rpc);
 	swi_schedule_workitem(&rpc->crpc_wi);
-	return;
 }
 
 
@@ -1440,11 +1438,13 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
 		if (rpcev != &crpc->crpc_reqstev &&
 		    rpcev != &crpc->crpc_replyev &&
 		    rpcev != &crpc->crpc_bulkev) {
-			CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
+			CERROR(
+			       "rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
 			       rpcev, crpc, &crpc->crpc_reqstev,
 			       &crpc->crpc_replyev, &crpc->crpc_bulkev);
 			CERROR("Bad event: status %d, type %d, lnet %d\n",
-			       rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
+			       rpcev->ev_status, rpcev->ev_type,
+			       rpcev->ev_lnet);
 			LBUG();
 		}
 
@@ -1511,7 +1511,8 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
 		     msg->msg_type != __swab32(type)) ||
 		    (msg->msg_magic != SRPC_MSG_MAGIC &&
 		     msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
-			CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
+			CERROR(
+			       "Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
 			       sv->sv_name, libcfs_id2str(ev->initiator),
 			       ev->status, ev->mlength,
 			       msg->msg_type, msg->msg_magic);
@@ -1668,6 +1669,4 @@ srpc_shutdown(void)
 	case SRPC_STATE_NI_INIT:
 		LNetNIFini();
 	}
-
-	return;
 }
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 7939e4e..7a2b3d1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -95,11 +95,11 @@ struct sfw_test_instance;
 #define SRPC_RDMA_PORTAL              52
 
 static inline srpc_msg_type_t
-srpc_service2request (int service)
+srpc_service2request(int service)
 {
 	switch (service) {
 	default:
-		LBUG ();
+		LBUG();
 	case SRPC_SERVICE_DEBUG:
 		return SRPC_MSG_DEBUG_REQST;
 
@@ -130,7 +130,7 @@ srpc_service2request (int service)
 }
 
 static inline srpc_msg_type_t
-srpc_service2reply (int service)
+srpc_service2reply(int service)
 {
 	return srpc_service2request(service) + 1;
 }
@@ -319,8 +319,8 @@ typedef struct srpc_service {
 	 * - sv_handler: process incoming RPC request
 	 * - sv_bulk_ready: notify bulk data
 	 */
-	int                     (*sv_handler) (srpc_server_rpc_t *);
-	int                     (*sv_bulk_ready) (srpc_server_rpc_t *, int);
+	int                     (*sv_handler)(srpc_server_rpc_t *);
+	int                     (*sv_bulk_ready)(srpc_server_rpc_t *, int);
 } srpc_service_t;
 
 typedef struct {
@@ -416,7 +416,7 @@ typedef struct sfw_test_case {
 srpc_client_rpc_t *
 sfw_create_rpc(lnet_process_id_t peer, int service,
 	       unsigned features, int nbulkiov, int bulklen,
-	       void (*done) (srpc_client_rpc_t *), void *priv);
+	       void (*done)(srpc_client_rpc_t *), void *priv);
 int sfw_create_test_rpc(sfw_test_unit_t *tsu,
 			lnet_process_id_t peer, unsigned features,
 			int nblk, int blklen, srpc_client_rpc_t **rpc);
@@ -428,7 +428,7 @@ void sfw_free_pages(srpc_server_rpc_t *rpc);
 void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
 		    int sink);
-int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
 
 srpc_client_rpc_t *
 srpc_create_client_rpc(lnet_process_id_t peer, int service,
@@ -504,26 +504,23 @@ void sfw_shutdown(void);
 void srpc_shutdown(void);
 
 static inline void
-srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
 {
 	LASSERT(rpc != NULL);
 	LASSERT(!srpc_event_pending(rpc));
 	LASSERT(atomic_read(&rpc->crpc_refcount) = 0);
 
-	if (rpc->crpc_fini = NULL) {
+	if (rpc->crpc_fini = NULL)
 		LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
-	} else {
+	else
 		(*rpc->crpc_fini) (rpc);
-	}
-
-	return;
 }
 
 static inline void
-srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
-		      int service, int nbulkiov, int bulklen,
-		      void (*rpc_done)(srpc_client_rpc_t *),
-		      void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+		     int service, int nbulkiov, int bulklen,
+		     void (*rpc_done)(srpc_client_rpc_t *),
+		     void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
 {
 	LASSERT(nbulkiov <= LNET_MAX_IOV);
 
@@ -555,24 +552,23 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
 	rpc->crpc_reqstmsg.msg_magic   = SRPC_MSG_MAGIC;
 	rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
 	rpc->crpc_reqstmsg.msg_type    = srpc_service2request(service);
-	return;
 }
 
 static inline const char *
-swi_state2str (int state)
+swi_state2str(int state)
 {
 #define STATE2STR(x) case x: return #x
-	switch(state) {
-		default:
-			LBUG();
-		STATE2STR(SWI_STATE_NEWBORN);
-		STATE2STR(SWI_STATE_REPLY_SUBMITTED);
-		STATE2STR(SWI_STATE_REPLY_SENT);
-		STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
-		STATE2STR(SWI_STATE_REQUEST_SENT);
-		STATE2STR(SWI_STATE_REPLY_RECEIVED);
-		STATE2STR(SWI_STATE_BULK_STARTED);
-		STATE2STR(SWI_STATE_DONE);
+	switch (state) {
+	default:
+		LBUG();
+	STATE2STR(SWI_STATE_NEWBORN);
+	STATE2STR(SWI_STATE_REPLY_SUBMITTED);
+	STATE2STR(SWI_STATE_REPLY_SENT);
+	STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
+	STATE2STR(SWI_STATE_REQUEST_SENT);
+	STATE2STR(SWI_STATE_REPLY_RECEIVED);
+	STATE2STR(SWI_STATE_BULK_STARTED);
+	STATE2STR(SWI_STATE_DONE);
 	}
 #undef STATE2STR
 }
@@ -607,7 +603,7 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
 
 	while (srpc_finish_service(sv) = 0) {
 		i++;
-		CDEBUG (((i & -i) = i) ? D_WARNING : D_NET,
+		CDEBUG(((i & -i) = i) ? D_WARNING : D_NET,
 			"Waiting for %s service to shutdown...\n",
 			sv->sv_name);
 		selftest_wait_events();
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index 2a8803d..e2b1a57 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -41,7 +41,7 @@
 typedef struct {
 	struct list_head stt_list;
 	unsigned long    stt_expires;
-	void             (*stt_func) (void *);
+	void             (*stt_func)(void *);
 	void             *stt_data;
 } stt_timer_t;
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (10 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH v4 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks Mike Shuey
                   ` (10 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Unify variable declarations to use a single space, and any other obvious
spacing flaws.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/acceptor.c    |   32 ++--
 drivers/staging/lustre/lnet/lnet/api-ni.c      |  198 ++++++++++----------
 drivers/staging/lustre/lnet/lnet/config.c      |  238 ++++++++++++------------
 drivers/staging/lustre/lnet/lnet/lib-eq.c      |   42 ++--
 drivers/staging/lustre/lnet/lnet/lib-md.c      |   26 ++--
 drivers/staging/lustre/lnet/lnet/lib-me.c      |   20 +-
 drivers/staging/lustre/lnet/lnet/lib-move.c    |  240 ++++++++++++------------
 drivers/staging/lustre/lnet/lnet/lib-msg.c     |   50 +++---
 drivers/staging/lustre/lnet/lnet/lib-ptl.c     |  116 ++++++------
 drivers/staging/lustre/lnet/lnet/lo.c          |    2 +-
 drivers/staging/lustre/lnet/lnet/module.c      |    8 +-
 drivers/staging/lustre/lnet/lnet/peer.c        |   50 +++---
 drivers/staging/lustre/lnet/lnet/router.c      |  224 +++++++++++-----------
 drivers/staging/lustre/lnet/lnet/router_proc.c |  210 +++++++++++-----------
 14 files changed, 728 insertions(+), 728 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 72fd1bf..69d4b19 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -143,10 +143,10 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
 	    __u32 local_ip, __u32 peer_ip, int peer_port)
 {
 	lnet_acceptor_connreq_t cr;
-	struct socket	   *sock;
-	int		     rc;
-	int		     port;
-	int		     fatal;
+	struct socket *sock;
+	int rc;
+	int port;
+	int fatal;
 
 	CLASSERT(sizeof(cr) <= 16);	    /* not too big to be on the stack */
 
@@ -211,12 +211,12 @@ static int
 lnet_accept(struct socket *sock, __u32 magic)
 {
 	lnet_acceptor_connreq_t cr;
-	__u32		   peer_ip;
-	int		     peer_port;
-	int		     rc;
-	int		     flip;
-	lnet_ni_t	      *ni;
-	char		   *str;
+	__u32 peer_ip;
+	int peer_port;
+	int rc;
+	int flip;
+	lnet_ni_t *ni;
+	char *str;
 
 	LASSERT(sizeof(cr) <= 16);	     /* not too big for the stack */
 
@@ -333,11 +333,11 @@ static int
 lnet_acceptor(void *arg)
 {
 	struct socket *newsock;
-	int	    rc;
-	__u32	  magic;
-	__u32	  peer_ip;
-	int	    peer_port;
-	int	    secure = (int)((long_ptr_t)arg);
+	int rc;
+	__u32 magic;
+	__u32 peer_ip;
+	int peer_port;
+	int secure = (int)((long_ptr_t)arg);
 
 	LASSERT(lnet_acceptor_state.pta_sock = NULL);
 
@@ -444,7 +444,7 @@ accept2secure(const char *acc, long *sec)
 int
 lnet_acceptor_start(void)
 {
-	int  rc;
+	int rc;
 	long rc2;
 	long secure;
 
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 4a14e51..6910f56 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -41,7 +41,7 @@
 
 #define D_LNI D_CONSOLE
 
-lnet_t      the_lnet;			   /* THE state of the network */
+lnet_t the_lnet;			   /* THE state of the network */
 EXPORT_SYMBOL(the_lnet);
 
 
@@ -70,8 +70,8 @@ lnet_get_routes(void)
 static char *
 lnet_get_networks(void)
 {
-	char   *nets;
-	int     rc;
+	char *nets;
+	int rc;
 
 	if (*networks != 0 && *ip2nets != 0) {
 		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
@@ -107,8 +107,8 @@ lnet_fini_locks(void)
 static int
 lnet_create_remote_nets_table(void)
 {
-	int		i;
-	struct list_head	*hash;
+	int i;
+	struct list_head *hash;
 
 	LASSERT(the_lnet.ln_remote_nets_hash = NULL);
 	LASSERT(the_lnet.ln_remote_nets_hbits > 0);
@@ -273,8 +273,8 @@ static void lnet_assert_wire_constants(void)
 static lnd_t *
 lnet_find_lnd_by_type(int type)
 {
-	lnd_t	      *lnd;
-	struct list_head	 *tmp;
+	lnd_t *lnd;
+	struct list_head *tmp;
 
 	/* holding lnd mutex */
 	list_for_each(tmp, &the_lnet.ln_lnds) {
@@ -325,7 +325,7 @@ void
 lnet_counters_get(lnet_counters_t *counters)
 {
 	lnet_counters_t *ctr;
-	int		i;
+	int i;
 
 	memset(counters, 0, sizeof(*counters));
 
@@ -353,7 +353,7 @@ void
 lnet_counters_reset(void)
 {
 	lnet_counters_t *counters;
-	int		i;
+	int i;
 
 	lnet_net_lock(LNET_LOCK_EX);
 
@@ -396,8 +396,8 @@ lnet_freelist_init(lnet_freelist_t *fl, int n, int size)
 void
 lnet_freelist_fini(lnet_freelist_t *fl)
 {
-	struct list_head       *el;
-	int	       count;
+	struct list_head *el;
+	int count;
 
 	if (fl->fl_nobjs = 0)
 		return;
@@ -441,7 +441,7 @@ lnet_res_type2str(int type)
 static void
 lnet_res_container_cleanup(struct lnet_res_container *rec)
 {
-	int	count = 0;
+	int count = 0;
 
 	if (rec->rec_type = 0) /* not set yet, it's uninitialized */
 		return;
@@ -486,8 +486,8 @@ static int
 lnet_res_container_setup(struct lnet_res_container *rec,
 			 int cpt, int type, int objnum, int objsz)
 {
-	int	rc = 0;
-	int	i;
+	int rc = 0;
+	int i;
 
 	LASSERT(rec->rec_type = 0);
 
@@ -525,8 +525,8 @@ out:
 static void
 lnet_res_containers_destroy(struct lnet_res_container **recs)
 {
-	struct lnet_res_container	*rec;
-	int				i;
+	struct lnet_res_container *rec;
+	int i;
 
 	cfs_percpt_for_each(rec, i, recs)
 		lnet_res_container_cleanup(rec);
@@ -537,10 +537,10 @@ lnet_res_containers_destroy(struct lnet_res_container **recs)
 static struct lnet_res_container **
 lnet_res_containers_create(int type, int objnum, int objsz)
 {
-	struct lnet_res_container	**recs;
-	struct lnet_res_container	*rec;
-	int				rc;
-	int				i;
+	struct lnet_res_container **recs;
+	struct lnet_res_container *rec;
+	int rc;
+	int i;
 
 	recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
 	if (recs = NULL) {
@@ -564,9 +564,9 @@ lnet_libhandle_t *
 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
 {
 	/* ALWAYS called with lnet_res_lock held */
-	struct list_head		*head;
-	lnet_libhandle_t	*lh;
-	unsigned int		hash;
+	struct list_head *head;
+	lnet_libhandle_t *lh;
+	unsigned int hash;
 
 	if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
 		return NULL;
@@ -586,8 +586,8 @@ void
 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
 {
 	/* ALWAYS called with lnet_res_lock held */
-	unsigned int	ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
-	unsigned int	hash;
+	unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
+	unsigned int hash;
 
 	lh->lh_cookie = rec->rec_lh_cookie;
 	rec->rec_lh_cookie += 1 << ibits;
@@ -605,7 +605,7 @@ lnet_prepare(lnet_pid_t requested_pid)
 {
 	/* Prepare to bring up the network */
 	struct lnet_res_container **recs;
-	int			  rc = 0;
+	int rc = 0;
 
 	LASSERT(the_lnet.ln_refcount = 0);
 
@@ -725,8 +725,8 @@ lnet_unprepare(void)
 lnet_ni_t  *
 lnet_net2ni_locked(__u32 net, int cpt)
 {
-	struct list_head	*tmp;
-	lnet_ni_t	*ni;
+	struct list_head *tmp;
+	lnet_ni_t *ni;
 
 	LASSERT(cpt != LNET_LOCK_EX);
 
@@ -758,8 +758,8 @@ EXPORT_SYMBOL(lnet_net2ni);
 static unsigned int
 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
 {
-	__u64		key = nid;
-	unsigned int	val;
+	__u64 key = nid;
+	unsigned int val;
 
 	LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
 
@@ -801,8 +801,8 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid)
 int
 lnet_cpt_of_nid(lnet_nid_t nid)
 {
-	int	cpt;
-	int	cpt2;
+	int cpt;
+	int cpt2;
 
 	if (LNET_CPT_NUMBER = 1)
 		return 0; /* the only one */
@@ -821,8 +821,8 @@ EXPORT_SYMBOL(lnet_cpt_of_nid);
 int
 lnet_islocalnet(__u32 net)
 {
-	struct lnet_ni	*ni;
-	int		cpt;
+	struct lnet_ni *ni;
+	int cpt;
 
 	cpt = lnet_net_lock_current();
 
@@ -838,8 +838,8 @@ lnet_islocalnet(__u32 net)
 lnet_ni_t  *
 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
 {
-	struct lnet_ni	*ni;
-	struct list_head	*tmp;
+	struct lnet_ni *ni;
+	struct list_head *tmp;
 
 	LASSERT(cpt != LNET_LOCK_EX);
 
@@ -858,8 +858,8 @@ lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
 int
 lnet_islocalnid(lnet_nid_t nid)
 {
-	struct lnet_ni	*ni;
-	int		cpt;
+	struct lnet_ni *ni;
+	int cpt;
 
 	cpt = lnet_net_lock_current();
 	ni = lnet_nid2ni_locked(nid, cpt);
@@ -874,10 +874,10 @@ int
 lnet_count_acceptor_nis(void)
 {
 	/* Return the # of NIs that need the acceptor. */
-	int		count = 0;
-	struct list_head	*tmp;
-	struct lnet_ni	*ni;
-	int		cpt;
+	int count = 0;
+	struct list_head *tmp;
+	struct lnet_ni *ni;
+	int cpt;
 
 	cpt = lnet_net_lock_current();
 	list_for_each(tmp, &the_lnet.ln_nis) {
@@ -895,7 +895,7 @@ lnet_count_acceptor_nis(void)
 static int
 lnet_ni_tq_credits(lnet_ni_t *ni)
 {
-	int	credits;
+	int credits;
 
 	LASSERT(ni->ni_ncpts >= 1);
 
@@ -912,9 +912,9 @@ lnet_ni_tq_credits(lnet_ni_t *ni)
 static void
 lnet_shutdown_lndnis(void)
 {
-	int		i;
-	int		islo;
-	lnet_ni_t	 *ni;
+	int i;
+	int islo;
+	lnet_ni_t *ni;
 
 	/* NB called holding the global mutex */
 
@@ -968,8 +968,8 @@ lnet_shutdown_lndnis(void)
 	 * and shut them down in guaranteed thread context */
 	i = 2;
 	while (!list_empty(&the_lnet.ln_nis_zombie)) {
-		int	*ref;
-		int	j;
+		int *ref;
+		int j;
 
 		ni = list_entry(the_lnet.ln_nis_zombie.next,
 				    lnet_ni_t, ni_list);
@@ -1029,15 +1029,15 @@ lnet_shutdown_lndnis(void)
 static int
 lnet_startup_lndnis(void)
 {
-	lnd_t			*lnd;
-	struct lnet_ni		*ni;
-	struct lnet_tx_queue	*tq;
-	struct list_head		nilist;
-	int			i;
-	int		rc = 0;
-	int		lnd_type;
-	int		nicount = 0;
-	char	      *nets = lnet_get_networks();
+	lnd_t *lnd;
+	struct lnet_ni *ni;
+	struct lnet_tx_queue *tq;
+	struct list_head nilist;
+	int i;
+	int rc = 0;
+	int lnd_type;
+	int nicount = 0;
+	char *nets = lnet_get_networks();
 
 	INIT_LIST_HEAD(&nilist);
 
@@ -1181,7 +1181,7 @@ lnet_startup_lndnis(void)
 int
 LNetInit(void)
 {
-	int	rc;
+	int rc;
 
 	lnet_assert_wire_constants();
 	LASSERT(!the_lnet.ln_init);
@@ -1277,8 +1277,8 @@ EXPORT_SYMBOL(LNetFini);
 int
 LNetNIInit(lnet_pid_t requested_pid)
 {
-	int	 im_a_router = 0;
-	int	 rc;
+	int im_a_router = 0;
+	int rc;
 
 	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
 
@@ -1413,9 +1413,9 @@ int
 LNetCtl(unsigned int cmd, void *arg)
 {
 	struct libcfs_ioctl_data *data = arg;
-	lnet_process_id_t	 id = {0};
-	lnet_ni_t		*ni;
-	int		       rc;
+	lnet_process_id_t id = {0};
+	lnet_ni_t *ni;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -1531,10 +1531,10 @@ EXPORT_SYMBOL(LNetCtl);
 int
 LNetGetId(unsigned int index, lnet_process_id_t *id)
 {
-	struct lnet_ni	*ni;
-	struct list_head	*tmp;
-	int		cpt;
-	int		rc = -ENOENT;
+	struct lnet_ni *ni;
+	struct list_head *tmp;
+	int cpt;
+	int rc = -ENOENT;
 
 	LASSERT(the_lnet.ln_init);
 
@@ -1575,11 +1575,11 @@ EXPORT_SYMBOL(LNetSnprintHandle);
 static int
 lnet_create_ping_info(void)
 {
-	int	       i;
-	int	       n;
-	int	       rc;
-	unsigned int      infosz;
-	lnet_ni_t	*ni;
+	int i;
+	int n;
+	int rc;
+	unsigned int infosz;
+	lnet_ni_t *ni;
 	lnet_process_id_t id;
 	lnet_ping_info_t *pinfo;
 
@@ -1633,7 +1633,7 @@ lnet_create_ping_info(void)
 static void
 lnet_destroy_ping_info(void)
 {
-	struct lnet_ni	*ni;
+	struct lnet_ni *ni;
 
 	lnet_net_lock(0);
 
@@ -1654,12 +1654,12 @@ lnet_destroy_ping_info(void)
 int
 lnet_ping_target_init(void)
 {
-	lnet_md_t	 md = { NULL };
-	lnet_handle_me_t  meh;
+	lnet_md_t md = { NULL };
+	lnet_handle_me_t meh;
 	lnet_process_id_t id;
-	int	       rc;
-	int	       rc2;
-	int	       infosz;
+	int rc;
+	int rc2;
+	int infosz;
 
 	rc = lnet_create_ping_info();
 	if (rc != 0)
@@ -1722,11 +1722,11 @@ lnet_ping_target_init(void)
 void
 lnet_ping_target_fini(void)
 {
-	lnet_event_t    event;
-	int	     rc;
-	int	     which;
-	int	     timeout_ms = 1000;
-	sigset_t    blocked = cfs_block_allsigs();
+	lnet_event_t event;
+	int rc;
+	int which;
+	int timeout_ms = 1000;
+	sigset_t blocked = cfs_block_allsigs();
 
 	LNetMDUnlink(the_lnet.ln_ping_target_md);
 	/* NB md could be busy; this just starts the unlink */
@@ -1759,22 +1759,22 @@ lnet_ping_target_fini(void)
 int
 lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
 {
-	lnet_handle_eq_t     eqh;
-	lnet_handle_md_t     mdh;
-	lnet_event_t	 event;
-	lnet_md_t	    md = { NULL };
-	int		  which;
-	int		  unlinked = 0;
-	int		  replied = 0;
-	const int	    a_long_time = 60000; /* mS */
-	int		  infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
-	lnet_ping_info_t    *info;
-	lnet_process_id_t    tmpid;
-	int		  i;
-	int		  nob;
-	int		  rc;
-	int		  rc2;
-	sigset_t	 blocked;
+	lnet_handle_eq_t eqh;
+	lnet_handle_md_t mdh;
+	lnet_event_t event;
+	lnet_md_t md = { NULL };
+	int which;
+	int unlinked = 0;
+	int replied = 0;
+	const int a_long_time = 60000; /* mS */
+	int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+	lnet_ping_info_t *info;
+	lnet_process_id_t tmpid;
+	int i;
+	int nob;
+	int rc;
+	int rc2;
+	sigset_t blocked;
 
 	if (n_ids <= 0 ||
 	    id.nid = LNET_NID_ANY ||
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 2dc4c4a..70bc809 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -38,9 +38,9 @@
 #include "../../include/linux/lnet/lib-lnet.h"
 
 struct lnet_text_buf_t {	    /* tmp struct for parsing routes */
-	struct list_head	 ltb_list;	/* stash on lists */
-	int		ltb_size;	/* allocated size */
-	char	       ltb_text[0];     /* text buffer */
+	struct list_head ltb_list;	/* stash on lists */
+	int ltb_size;	/* allocated size */
+	char ltb_text[0];     /* text buffer */
 };
 
 static int lnet_tbnob;			/* track text buf allocation */
@@ -80,8 +80,8 @@ lnet_issep(char c)
 static int
 lnet_net_unique(__u32 net, struct list_head *nilist)
 {
-	struct list_head       *tmp;
-	lnet_ni_t	*ni;
+	struct list_head *tmp;
+	lnet_ni_t *ni;
 
 	list_for_each(tmp, nilist) {
 		ni = list_entry(tmp, lnet_ni_t, ni_list);
@@ -111,10 +111,10 @@ lnet_ni_free(struct lnet_ni *ni)
 static lnet_ni_t *
 lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
 {
-	struct lnet_tx_queue	*tq;
-	struct lnet_ni		*ni;
-	int			rc;
-	int			i;
+	struct lnet_tx_queue *tq;
+	struct lnet_ni *ni;
+	int rc;
+	int i;
 
 	if (!lnet_net_unique(net, nilist)) {
 		LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
@@ -178,13 +178,13 @@ int
 lnet_parse_networks(struct list_head *nilist, char *networks)
 {
 	struct cfs_expr_list *el = NULL;
-	int		tokensize = strlen(networks) + 1;
-	char		*tokens;
-	char		*str;
-	char		*tmp;
-	struct lnet_ni	*ni;
-	__u32		net;
-	int		nnets = 0;
+	int tokensize = strlen(networks) + 1;
+	char *tokens;
+	char *str;
+	char *tmp;
+	struct lnet_ni *ni;
+	__u32 net;
+	int nnets = 0;
 
 	if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
 		/* _WAY_ conservative */
@@ -210,12 +210,12 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
 		goto failed;
 
 	while (str != NULL && *str != 0) {
-		char	*comma = strchr(str, ',');
-		char	*bracket = strchr(str, '(');
-		char	*square = strchr(str, '[');
-		char	*iface;
-		int	niface;
-		int	rc;
+		char *comma = strchr(str, ',');
+		char *bracket = strchr(str, '(');
+		char *square = strchr(str, '[');
+		char *iface;
+		int niface;
+		int rc;
 
 		/* NB we don't check interface conflicts here; it's the LNDs
 		 * responsibility (if it cares at all) */
@@ -369,7 +369,7 @@ static struct lnet_text_buf_t *
 lnet_new_text_buf(int str_len)
 {
 	struct lnet_text_buf_t *ltb;
-	int	      nob;
+	int nob;
 
 	/* NB allocate space for the terminating 0 */
 	nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]);
@@ -404,7 +404,7 @@ lnet_free_text_buf(struct lnet_text_buf_t *ltb)
 static void
 lnet_free_text_bufs(struct list_head *tbs)
 {
-	struct lnet_text_buf_t  *ltb;
+	struct lnet_text_buf_t *ltb;
 
 	while (!list_empty(tbs)) {
 		ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
@@ -417,11 +417,11 @@ lnet_free_text_bufs(struct list_head *tbs)
 static int
 lnet_str2tbs_sep(struct list_head *tbs, char *str)
 {
-	struct list_head	pending;
-	char	     *sep;
-	int	       nob;
-	int	       i;
-	struct lnet_text_buf_t  *ltb;
+	struct list_head pending;
+	char *sep;
+	int nob;
+	int i;
+	struct lnet_text_buf_t *ltb;
 
 	INIT_LIST_HEAD(&pending);
 
@@ -477,8 +477,8 @@ lnet_expand1tb(struct list_head *list,
 	       char *str, char *sep1, char *sep2,
 	       char *item, int itemlen)
 {
-	int	      len1 = (int)(sep1 - str);
-	int	      len2 = strlen(sep2 + 1);
+	int len1 = (int)(sep1 - str);
+	int len2 = strlen(sep2 + 1);
 	struct lnet_text_buf_t *ltb;
 
 	LASSERT(*sep1 = '[');
@@ -500,18 +500,18 @@ lnet_expand1tb(struct list_head *list,
 static int
 lnet_str2tbs_expand(struct list_head *tbs, char *str)
 {
-	char	      num[16];
-	struct list_head	pending;
-	char	     *sep;
-	char	     *sep2;
-	char	     *parsed;
-	char	     *enditem;
-	int	       lo;
-	int	       hi;
-	int	       stride;
-	int	       i;
-	int	       nob;
-	int	       scanned;
+	char num[16];
+	struct list_head pending;
+	char *sep;
+	char *sep2;
+	char *parsed;
+	char *enditem;
+	int lo;
+	int hi;
+	int stride;
+	int i;
+	int nob;
+	int scanned;
 
 	INIT_LIST_HEAD(&pending);
 
@@ -584,8 +584,8 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
 static int
 lnet_parse_hops(char *str, unsigned int *hops)
 {
-	int     len = strlen(str);
-	int     nob = len;
+	int len = strlen(str);
+	int nob = len;
 
 	return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
 		nob = len &&
@@ -597,9 +597,9 @@ lnet_parse_hops(char *str, unsigned int *hops)
 static int
 lnet_parse_priority(char *str, unsigned int *priority, char **token)
 {
-	int   nob;
+	int nob;
 	char *sep;
-	int   len;
+	int len;
 
 	sep = strchr(str, LNET_PRIORITY_SEPARATOR);
 	if (sep = NULL) {
@@ -628,23 +628,23 @@ static int
 lnet_parse_route(char *str, int *im_a_router)
 {
 	/* static scratch buffer OK (single threaded) */
-	static char       cmd[LNET_SINGLE_TEXTBUF_NOB];
-
-	struct list_head	nets;
-	struct list_head	gateways;
-	struct list_head       *tmp1;
-	struct list_head       *tmp2;
-	__u32	     net;
-	lnet_nid_t	nid;
-	struct lnet_text_buf_t  *ltb;
-	int	       rc;
-	char	     *sep;
-	char	     *token = str;
-	int	       ntokens = 0;
-	int	       myrc = -1;
-	unsigned int      hops;
-	int	       got_hops = 0;
-	unsigned int	  priority = 0;
+	static char cmd[LNET_SINGLE_TEXTBUF_NOB];
+
+	struct list_head nets;
+	struct list_head gateways;
+	struct list_head *tmp1;
+	struct list_head *tmp2;
+	__u32 net;
+	lnet_nid_t nid;
+	struct lnet_text_buf_t *ltb;
+	int rc;
+	char *sep;
+	char *token = str;
+	int ntokens = 0;
+	int myrc = -1;
+	unsigned int hops;
+	int got_hops = 0;
+	unsigned int priority = 0;
 
 	INIT_LIST_HEAD(&gateways);
 	INIT_LIST_HEAD(&nets);
@@ -772,7 +772,7 @@ lnet_parse_route(char *str, int *im_a_router)
 static int
 lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
 {
-	struct lnet_text_buf_t   *ltb;
+	struct lnet_text_buf_t *ltb;
 
 	while (!list_empty(tbs)) {
 		ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
@@ -792,8 +792,8 @@ lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
 int
 lnet_parse_routes(char *routes, int *im_a_router)
 {
-	struct list_head	tbs;
-	int	       rc = 0;
+	struct list_head tbs;
+	int rc = 0;
 
 	*im_a_router = 0;
 
@@ -814,8 +814,8 @@ static int
 lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
 {
 	LIST_HEAD(list);
-	int		rc;
-	int		i;
+	int rc;
+	int i;
 
 	rc = cfs_ip_addr_parse(token, len, &list);
 	if (rc != 0)
@@ -834,13 +834,13 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
 {
 	static char tokens[LNET_SINGLE_TEXTBUF_NOB];
 
-	int   matched = 0;
-	int   ntokens = 0;
-	int   len;
+	int matched = 0;
+	int ntokens = 0;
+	int len;
 	char *net = NULL;
 	char *sep;
 	char *token;
-	int   rc;
+	int rc;
 
 	LASSERT(strlen(net_entry) < sizeof(tokens));
 
@@ -889,8 +889,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
 static __u32
 lnet_netspec2net(char *netspec)
 {
-	char   *bracket = strchr(netspec, '(');
-	__u32   net;
+	char *bracket = strchr(netspec, '(');
+	__u32 net;
 
 	if (bracket != NULL)
 		*bracket = 0;
@@ -906,15 +906,15 @@ lnet_netspec2net(char *netspec)
 static int
 lnet_splitnets(char *source, struct list_head *nets)
 {
-	int	       offset = 0;
-	int	       offset2;
-	int	       len;
-	struct lnet_text_buf_t  *tb;
-	struct lnet_text_buf_t  *tb2;
-	struct list_head       *t;
-	char	     *sep;
-	char	     *bracket;
-	__u32	     net;
+	int offset = 0;
+	int offset2;
+	int len;
+	struct lnet_text_buf_t *tb;
+	struct lnet_text_buf_t *tb2;
+	struct list_head *t;
+	char *sep;
+	char *bracket;
+	__u32 net;
 
 	LASSERT(!list_empty(nets));
 	LASSERT(nets->next = nets->prev);     /* single entry */
@@ -986,22 +986,22 @@ lnet_splitnets(char *source, struct list_head *nets)
 static int
 lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
 {
-	static char	networks[LNET_SINGLE_TEXTBUF_NOB];
-	static char	source[LNET_SINGLE_TEXTBUF_NOB];
-
-	struct list_head	  raw_entries;
-	struct list_head	  matched_nets;
-	struct list_head	  current_nets;
-	struct list_head	 *t;
-	struct list_head	 *t2;
-	struct lnet_text_buf_t    *tb;
-	struct lnet_text_buf_t    *tb2;
-	__u32	       net1;
-	__u32	       net2;
-	int		 len;
-	int		 count;
-	int		 dup;
-	int		 rc;
+	static char networks[LNET_SINGLE_TEXTBUF_NOB];
+	static char source[LNET_SINGLE_TEXTBUF_NOB];
+
+	struct list_head raw_entries;
+	struct list_head matched_nets;
+	struct list_head current_nets;
+	struct list_head *t;
+	struct list_head *t2;
+	struct lnet_text_buf_t *tb;
+	struct lnet_text_buf_t *tb2;
+	__u32 net1;
+	__u32 net2;
+	int len;
+	int count;
+	int dup;
+	int rc;
 
 	INIT_LIST_HEAD(&raw_entries);
 	if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
@@ -1112,15 +1112,15 @@ lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
 static int
 lnet_ipaddr_enumerate(__u32 **ipaddrsp)
 {
-	int	up;
-	__u32      netmask;
-	__u32     *ipaddrs;
-	__u32     *ipaddrs2;
-	int	nip;
-	char     **ifnames;
-	int	nif = libcfs_ipif_enumerate(&ifnames);
-	int	i;
-	int	rc;
+	int up;
+	__u32 netmask;
+	__u32 *ipaddrs;
+	__u32 *ipaddrs2;
+	int nip;
+	char **ifnames;
+	int nif = libcfs_ipif_enumerate(&ifnames);
+	int i;
+	int rc;
 
 	if (nif <= 0)
 		return nif;
@@ -1178,9 +1178,9 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
 int
 lnet_parse_ip2nets(char **networksp, char *ip2nets)
 {
-	__u32     *ipaddrs = NULL;
-	int	nip = lnet_ipaddr_enumerate(&ipaddrs);
-	int	rc;
+	__u32 *ipaddrs = NULL;
+	int nip = lnet_ipaddr_enumerate(&ipaddrs);
+	int rc;
 
 	if (nip < 0) {
 		LCONSOLE_ERROR_MSG(0x117,
@@ -1215,14 +1215,14 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
 int
 lnet_set_ip_niaddr(lnet_ni_t *ni)
 {
-	__u32  net = LNET_NIDNET(ni->ni_nid);
+	__u32 net = LNET_NIDNET(ni->ni_nid);
 	char **names;
-	int    n;
-	__u32  ip;
-	__u32  netmask;
-	int    up;
-	int    i;
-	int    rc;
+	int n;
+	__u32 ip;
+	__u32 netmask;
+	int up;
+	int i;
+	int rc;
 
 	/* Convenience for LNDs that use the IP address of a local interface as
 	 * the local address part of their NID */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 5470148..368bde9 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -70,7 +70,7 @@ int
 LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
 	    lnet_handle_eq_t *handle)
 {
-	lnet_eq_t     *eq;
+	lnet_eq_t *eq;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -151,13 +151,13 @@ EXPORT_SYMBOL(LNetEQAlloc);
 int
 LNetEQFree(lnet_handle_eq_t eqh)
 {
-	struct lnet_eq	*eq;
-	lnet_event_t	*events = NULL;
-	int		**refs = NULL;
-	int		*ref;
-	int		rc = 0;
-	int		size = 0;
-	int		i;
+	struct lnet_eq *eq;
+	lnet_event_t *events = NULL;
+	int **refs = NULL;
+	int *ref;
+	int rc = 0;
+	int size = 0;
+	int i;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -185,9 +185,9 @@ LNetEQFree(lnet_handle_eq_t eqh)
 	}
 
 	/* stash for free after lock dropped */
-	events	= eq->eq_events;
-	size	= eq->eq_size;
-	refs	= eq->eq_refs;
+	events = eq->eq_events;
+	size = eq->eq_size;
+	refs = eq->eq_refs;
 
 	lnet_res_lh_invalidate(&eq->eq_lh);
 	list_del(&eq->eq_list);
@@ -237,9 +237,9 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
 static int
 lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
 {
-	int		new_index = eq->eq_deq_seq & (eq->eq_size - 1);
-	lnet_event_t	*new_event = &eq->eq_events[new_index];
-	int		rc;
+	int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
+	lnet_event_t *new_event = &eq->eq_events[new_index];
+	int rc;
 
 	/* must called with lnet_eq_wait_lock hold */
 	if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
@@ -323,10 +323,10 @@ static int
 lnet_eq_wait_locked(int *timeout_ms)
 __must_hold(&the_lnet.ln_eq_wait_lock)
 {
-	int		tms = *timeout_ms;
-	int		wait;
-	wait_queue_t  wl;
-	unsigned long      now;
+	int tms = *timeout_ms;
+	int wait;
+	wait_queue_t wl;
+	unsigned long now;
 
 	if (tms = 0)
 		return -1; /* don't want to wait and no new event */
@@ -392,9 +392,9 @@ int
 LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
 	   lnet_event_t *event, int *which)
 {
-	int	wait = 1;
-	int	rc;
-	int	i;
+	int wait = 1;
+	int rc;
+	int i;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 89d660f..5856c30 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -74,7 +74,7 @@ lnet_md_unlink(lnet_libmd_t *md)
 	CDEBUG(D_NET, "Unlinking md %p\n", md);
 
 	if (md->md_eq != NULL) {
-		int	cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
+		int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
 
 		LASSERT(*md->md_eq->eq_refs[cpt] > 0);
 		(*md->md_eq->eq_refs[cpt])--;
@@ -88,9 +88,9 @@ lnet_md_unlink(lnet_libmd_t *md)
 static int
 lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 {
-	int	  i;
+	int i;
 	unsigned int niov;
-	int	  total_length = 0;
+	int total_length = 0;
 
 	lmd->md_me = NULL;
 	lmd->md_start = umd->start;
@@ -268,10 +268,10 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
 {
 	LIST_HEAD(matches);
 	LIST_HEAD(drops);
-	struct lnet_me		*me;
-	struct lnet_libmd	*md;
-	int			cpt;
-	int			rc;
+	struct lnet_me *me;
+	struct lnet_libmd *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -346,9 +346,9 @@ EXPORT_SYMBOL(LNetMDAttach);
 int
 LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
 {
-	lnet_libmd_t	*md;
-	int		cpt;
-	int		rc;
+	lnet_libmd_t *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -421,9 +421,9 @@ EXPORT_SYMBOL(LNetMDBind);
 int
 LNetMDUnlink(lnet_handle_md_t mdh)
 {
-	lnet_event_t	ev;
-	lnet_libmd_t	*md;
-	int		cpt;
+	lnet_event_t ev;
+	lnet_libmd_t *md;
+	int cpt;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index a3f9292..09e9008 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -80,8 +80,8 @@ LNetMEAttach(unsigned int portal,
 	     lnet_handle_me_t *handle)
 {
 	struct lnet_match_table *mtable;
-	struct lnet_me		*me;
-	struct list_head		*head;
+	struct lnet_me *me;
+	struct list_head *head;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -151,10 +151,10 @@ LNetMEInsert(lnet_handle_me_t current_meh,
 	     lnet_unlink_t unlink, lnet_ins_pos_t pos,
 	     lnet_handle_me_t *handle)
 {
-	struct lnet_me		*current_me;
-	struct lnet_me		*new_me;
-	struct lnet_portal	*ptl;
-	int			cpt;
+	struct lnet_me *current_me;
+	struct lnet_me *new_me;
+	struct lnet_portal *ptl;
+	int cpt;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -228,10 +228,10 @@ EXPORT_SYMBOL(LNetMEInsert);
 int
 LNetMEUnlink(lnet_handle_me_t meh)
 {
-	lnet_me_t	*me;
-	lnet_libmd_t	*md;
-	lnet_event_t	ev;
-	int		cpt;
+	lnet_me_t *me;
+	lnet_libmd_t *md;
+	lnet_event_t ev;
+	int cpt;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index c2fb70e..ba59fe7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
 int
 lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
 {
-	lnet_test_peer_t  *tp;
-	struct list_head	*el;
-	struct list_head	*next;
-	struct list_head	 cull;
+	lnet_test_peer_t *tp;
+	struct list_head *el;
+	struct list_head *next;
+	struct list_head cull;
 
 	LASSERT(the_lnet.ln_init);
 
@@ -103,10 +103,10 @@ static int
 fail_peer(lnet_nid_t nid, int outgoing)
 {
 	lnet_test_peer_t *tp;
-	struct list_head       *el;
-	struct list_head       *next;
-	struct list_head	cull;
-	int	       fail = 0;
+	struct list_head *el;
+	struct list_head *next;
+	struct list_head cull;
+	int fail = 0;
 
 	INIT_LIST_HEAD(&cull);
 
@@ -175,7 +175,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
 		   unsigned int nob)
 {
 	/* NB diov, siov are READ-ONLY */
-	unsigned int  this_nob;
+	unsigned int this_nob;
 
 	if (nob = 0)
 		return;
@@ -236,8 +236,8 @@ lnet_extract_iov(int dst_niov, struct kvec *dst,
 	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
 	 * for exactly 'len' bytes, and return the number of entries.
 	 * NB not destructive to 'src' */
-	unsigned int    frag_len;
-	unsigned int    niov;
+	unsigned int frag_len;
+	unsigned int niov;
 
 	if (len = 0)			   /* no data => */
 		return 0;		     /* no frags */
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(lnet_extract_iov);
 unsigned int
 lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
 {
-	unsigned int  nob = 0;
+	unsigned int nob = 0;
 
 	while (niov-- > 0)
 		nob += (kiov++)->kiov_len;
@@ -294,9 +294,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
 		    unsigned int nob)
 {
 	/* NB diov, siov are READ-ONLY */
-	unsigned int    this_nob;
-	char	   *daddr = NULL;
-	char	   *saddr = NULL;
+	unsigned int this_nob;
+	char *daddr = NULL;
+	char *saddr = NULL;
 
 	if (nob = 0)
 		return;
@@ -376,8 +376,8 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
 		   unsigned int kiovoffset, unsigned int nob)
 {
 	/* NB iov, kiov are READ-ONLY */
-	unsigned int    this_nob;
-	char	   *addr = NULL;
+	unsigned int this_nob;
+	char *addr = NULL;
 
 	if (nob = 0)
 		return;
@@ -447,8 +447,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
 		   unsigned int nob)
 {
 	/* NB kiov, iov are READ-ONLY */
-	unsigned int    this_nob;
-	char	   *addr = NULL;
+	unsigned int this_nob;
+	char *addr = NULL;
 
 	if (nob = 0)
 		return;
@@ -518,8 +518,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
 	/* Initialise 'dst' to the subset of 'src' starting at 'offset',
 	 * for exactly 'len' bytes, and return the number of entries.
 	 * NB not destructive to 'src' */
-	unsigned int    frag_len;
-	unsigned int    niov;
+	unsigned int frag_len;
+	unsigned int niov;
 
 	if (len = 0)			   /* no data => */
 		return 0;		     /* no frags */
@@ -565,10 +565,10 @@ static void
 lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 	     unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-	unsigned int  niov = 0;
+	unsigned int niov = 0;
 	struct kvec *iov = NULL;
-	lnet_kiov_t  *kiov = NULL;
-	int	   rc;
+	lnet_kiov_t *kiov = NULL;
+	int rc;
 
 	LASSERT(!in_interrupt());
 	LASSERT(mlen = 0 || msg != NULL);
@@ -642,8 +642,8 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
 static void
 lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	void   *priv = msg->msg_private;
-	int     rc;
+	void *priv = msg->msg_private;
+	int rc;
 
 	LASSERT(!in_interrupt());
 	LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) = LOLND ||
@@ -657,7 +657,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	int	rc;
+	int rc;
 
 	LASSERT(!msg->msg_sending);
 	LASSERT(msg->msg_receiving);
@@ -700,7 +700,7 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 static inline int
 lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
 {
-	int	alive;
+	int alive;
 	unsigned long deadline;
 
 	LASSERT(lnet_peer_aliveness_enabled(lp));
@@ -785,10 +785,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
 static int
 lnet_post_send_locked(lnet_msg_t *msg, int do_send)
 {
-	lnet_peer_t		*lp = msg->msg_txpeer;
-	lnet_ni_t		*ni = lp->lp_ni;
-	int			cpt = msg->msg_tx_cpt;
-	struct lnet_tx_queue	*tq = ni->ni_tx_queues[cpt];
+	lnet_peer_t *lp = msg->msg_txpeer;
+	lnet_ni_t *ni = lp->lp_ni;
+	int cpt = msg->msg_tx_cpt;
+	struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
 
 	/* non-lnet_send() callers have checked before */
 	LASSERT(!do_send || msg->msg_tx_delayed);
@@ -871,8 +871,8 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
 static lnet_rtrbufpool_t *
 lnet_msg2bufpool(lnet_msg_t *msg)
 {
-	lnet_rtrbufpool_t	*rbp;
-	int			cpt;
+	lnet_rtrbufpool_t *rbp;
+	int cpt;
 
 	LASSERT(msg->msg_rx_committed);
 
@@ -894,9 +894,9 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
 	/* lnet_parse is going to lnet_net_unlock immediately after this, so it
 	 * sets do_recv FALSE and I don't do the unlock/send/lock bit.  I
 	 * return EAGAIN if msg blocked and 0 if received or OK to receive */
-	lnet_peer_t	 *lp = msg->msg_rxpeer;
-	lnet_rtrbufpool_t   *rbp;
-	lnet_rtrbuf_t       *rb;
+	lnet_peer_t *lp = msg->msg_rxpeer;
+	lnet_rtrbufpool_t *rbp;
+	lnet_rtrbuf_t *rb;
 
 	LASSERT(msg->msg_iov = NULL);
 	LASSERT(msg->msg_kiov = NULL);
@@ -967,11 +967,11 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
 void
 lnet_return_tx_credits_locked(lnet_msg_t *msg)
 {
-	lnet_peer_t	*txpeer = msg->msg_txpeer;
-	lnet_msg_t	*msg2;
+	lnet_peer_t *txpeer = msg->msg_txpeer;
+	lnet_msg_t *msg2;
 
 	if (msg->msg_txcredit) {
-		struct lnet_ni	     *ni = txpeer->lp_ni;
+		struct lnet_ni *ni = txpeer->lp_ni;
 		struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
 
 		/* give back NI txcredits */
@@ -1025,12 +1025,12 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
 void
 lnet_return_rx_credits_locked(lnet_msg_t *msg)
 {
-	lnet_peer_t	*rxpeer = msg->msg_rxpeer;
-	lnet_msg_t	*msg2;
+	lnet_peer_t *rxpeer = msg->msg_rxpeer;
+	lnet_msg_t *msg2;
 
 	if (msg->msg_rtrcredit) {
 		/* give back global router credits */
-		lnet_rtrbuf_t     *rb;
+		lnet_rtrbuf_t *rb;
 		lnet_rtrbufpool_t *rbp;
 
 		/* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
@@ -1122,13 +1122,13 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
 static lnet_peer_t *
 lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
 {
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*rtr;
-	lnet_route_t		*rtr_best;
-	lnet_route_t		*rtr_last;
-	struct lnet_peer	*lp_best;
-	struct lnet_peer	*lp;
-	int			rc;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *rtr;
+	lnet_route_t *rtr_best;
+	lnet_route_t *rtr_last;
+	struct lnet_peer *lp_best;
+	struct lnet_peer *lp;
+	int rc;
 
 	/* If @rtr_nid is not LNET_NID_ANY, return the gateway with
 	 * rtr_nid nid, otherwise find the best gateway I can use */
@@ -1182,13 +1182,13 @@ lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
 int
 lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
 {
-	lnet_nid_t		dst_nid = msg->msg_target.nid;
-	struct lnet_ni		*src_ni;
-	struct lnet_ni		*local_ni;
-	struct lnet_peer	*lp;
-	int			cpt;
-	int			cpt2;
-	int			rc;
+	lnet_nid_t dst_nid = msg->msg_target.nid;
+	struct lnet_ni *src_ni;
+	struct lnet_ni *local_ni;
+	struct lnet_peer *lp;
+	int cpt;
+	int cpt2;
+	int rc;
 
 	/* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
 	 * but we might want to use pre-determined router for ACK/REPLY
@@ -1364,7 +1364,7 @@ lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
 static void
 lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	lnet_hdr_t	*hdr = &msg->msg_hdr;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
 
 	if (msg->msg_wanted != 0)
 		lnet_setpayloadbuffer(msg);
@@ -1383,9 +1383,9 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	lnet_hdr_t		*hdr = &msg->msg_hdr;
-	struct lnet_match_info	info;
-	int			rc;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
+	struct lnet_match_info info;
+	int rc;
 
 	/* Convert put fields to host byte order */
 	hdr->msg.put.match_bits	= le64_to_cpu(hdr->msg.put.match_bits);
@@ -1433,24 +1433,24 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
 {
-	struct lnet_match_info	info;
-	lnet_hdr_t		*hdr = &msg->msg_hdr;
-	lnet_handle_wire_t	reply_wmd;
-	int			rc;
+	struct lnet_match_info info;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
+	lnet_handle_wire_t reply_wmd;
+	int rc;
 
 	/* Convert get fields to host byte order */
-	hdr->msg.get.match_bits	  = le64_to_cpu(hdr->msg.get.match_bits);
-	hdr->msg.get.ptl_index	  = le32_to_cpu(hdr->msg.get.ptl_index);
-	hdr->msg.get.sink_length  = le32_to_cpu(hdr->msg.get.sink_length);
-	hdr->msg.get.src_offset	  = le32_to_cpu(hdr->msg.get.src_offset);
-
-	info.mi_id.nid	= hdr->src_nid;
-	info.mi_id.pid	= hdr->src_pid;
-	info.mi_opc	= LNET_MD_OP_GET;
-	info.mi_portal	= hdr->msg.get.ptl_index;
-	info.mi_rlength	= hdr->msg.get.sink_length;
-	info.mi_roffset	= hdr->msg.get.src_offset;
-	info.mi_mbits	= hdr->msg.get.match_bits;
+	hdr->msg.get.match_bits  = le64_to_cpu(hdr->msg.get.match_bits);
+	hdr->msg.get.ptl_index   = le32_to_cpu(hdr->msg.get.ptl_index);
+	hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
+	hdr->msg.get.src_offset  = le32_to_cpu(hdr->msg.get.src_offset);
+
+	info.mi_id.nid  = hdr->src_nid;
+	info.mi_id.pid  = hdr->src_pid;
+	info.mi_opc     = LNET_MD_OP_GET;
+	info.mi_portal  = hdr->msg.get.ptl_index;
+	info.mi_rlength = hdr->msg.get.sink_length;
+	info.mi_roffset = hdr->msg.get.src_offset;
+	info.mi_mbits   = hdr->msg.get.match_bits;
 
 	rc = lnet_ptl_match_md(&info, msg);
 	if (rc = LNET_MATCHMD_DROP) {
@@ -1497,13 +1497,13 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
 static int
 lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	void	     *private = msg->msg_private;
-	lnet_hdr_t       *hdr = &msg->msg_hdr;
+	void *private = msg->msg_private;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
 	lnet_process_id_t src = {0};
-	lnet_libmd_t     *md;
-	int	       rlength;
-	int	       mlength;
-	int			cpt;
+	lnet_libmd_t *md;
+	int rlength;
+	int mlength;
+	int cpt;
 
 	cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
 	lnet_res_lock(cpt);
@@ -1562,10 +1562,10 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	lnet_hdr_t       *hdr = &msg->msg_hdr;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
 	lnet_process_id_t src = {0};
-	lnet_libmd_t     *md;
-	int			cpt;
+	lnet_libmd_t *md;
+	int cpt;
 
 	src.nid = hdr->src_nid;
 	src.pid = hdr->src_pid;
@@ -1612,7 +1612,7 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
 static int
 lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
 {
-	int	rc = 0;
+	int rc = 0;
 
 	if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
 	    lnet_msg2bufpool(msg)->rbp_credits <= 0) {
@@ -1713,15 +1713,15 @@ int
 lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
 	   void *private, int rdma_req)
 {
-	int		rc = 0;
-	int		cpt;
-	int		for_me;
-	struct lnet_msg	*msg;
-	lnet_pid_t     dest_pid;
-	lnet_nid_t     dest_nid;
-	lnet_nid_t     src_nid;
-	__u32	  payload_length;
-	__u32	  type;
+	int rc = 0;
+	int cpt;
+	int for_me;
+	struct lnet_msg *msg;
+	lnet_pid_t dest_pid;
+	lnet_nid_t dest_nid;
+	lnet_nid_t src_nid;
+	__u32 payload_length;
+	__u32 type;
 
 	LASSERT(!in_interrupt());
 
@@ -1945,8 +1945,8 @@ void
 lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
 {
 	while (!list_empty(head)) {
-		lnet_process_id_t	id = {0};
-		lnet_msg_t		*msg;
+		lnet_process_id_t id = {0};
+		lnet_msg_t *msg;
 
 		msg = list_entry(head->next, lnet_msg_t, msg_list);
 		list_del(&msg->msg_list);
@@ -1986,8 +1986,8 @@ void
 lnet_recv_delayed_msg_list(struct list_head *head)
 {
 	while (!list_empty(head)) {
-		lnet_msg_t	  *msg;
-		lnet_process_id_t  id;
+		lnet_msg_t *msg;
+		lnet_process_id_t id;
 
 		msg = list_entry(head->next, lnet_msg_t, msg_list);
 		list_del(&msg->msg_list);
@@ -2063,10 +2063,10 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
 	__u64 match_bits, unsigned int offset,
 	__u64 hdr_data)
 {
-	struct lnet_msg		*msg;
-	struct lnet_libmd	*md;
-	int			cpt;
-	int			rc;
+	struct lnet_msg *msg;
+	struct lnet_libmd *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -2153,10 +2153,10 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
 	 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
 	 * lnet_finalize() is called on it, so the LND must call this first */
 
-	struct lnet_msg		*msg = lnet_msg_alloc();
-	struct lnet_libmd	*getmd = getmsg->msg_md;
-	lnet_process_id_t	peer_id = getmsg->msg_target;
-	int			cpt;
+	struct lnet_msg *msg = lnet_msg_alloc();
+	struct lnet_libmd *getmd = getmsg->msg_md;
+	lnet_process_id_t peer_id = getmsg->msg_target;
+	int cpt;
 
 	LASSERT(!getmsg->msg_target_is_router);
 	LASSERT(!getmsg->msg_routing);
@@ -2263,10 +2263,10 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
 	lnet_process_id_t target, unsigned int portal,
 	__u64 match_bits, unsigned int offset)
 {
-	struct lnet_msg		*msg;
-	struct lnet_libmd	*md;
-	int			cpt;
-	int			rc;
+	struct lnet_msg *msg;
+	struct lnet_libmd *md;
+	int cpt;
+	int rc;
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -2353,14 +2353,14 @@ EXPORT_SYMBOL(LNetGet);
 int
 LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
 {
-	struct list_head		*e;
-	struct lnet_ni		*ni;
-	lnet_remotenet_t	*rnet;
-	__u32			dstnet = LNET_NIDNET(dstnid);
-	int			hops;
-	int			cpt;
-	__u32			order = 2;
-	struct list_head		*rn_list;
+	struct list_head *e;
+	struct lnet_ni *ni;
+	lnet_remotenet_t *rnet;
+	__u32 dstnet = LNET_NIDNET(dstnid);
+	int hops;
+	int cpt;
+	__u32 order = 2;
+	struct list_head *rn_list;
 
 	/* if !local_nid_dist_zero, I don't return a distance of 0 ever
 	 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index a46ccbf..65d7595 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -60,8 +60,8 @@ lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
 void
 lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
 {
-	lnet_hdr_t	*hdr = &msg->msg_hdr;
-	lnet_event_t	*ev  = &msg->msg_ev;
+	lnet_hdr_t *hdr = &msg->msg_hdr;
+	lnet_event_t *ev  = &msg->msg_ev;
 
 	LASSERT(!msg->msg_routing);
 
@@ -73,7 +73,7 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
 		ev->target.pid    = le32_to_cpu(hdr->dest_pid);
 		ev->initiator.nid = LNET_NID_ANY;
 		ev->initiator.pid = the_lnet.ln_pid;
-		ev->sender	  = LNET_NID_ANY;
+		ev->sender        = LNET_NID_ANY;
 
 	} else {
 		/* event for passive message */
@@ -82,9 +82,9 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
 		ev->initiator.pid = hdr->src_pid;
 		ev->initiator.nid = hdr->src_nid;
 		ev->rlength       = hdr->payload_length;
-		ev->sender	  = msg->msg_from;
-		ev->mlength	  = msg->msg_wanted;
-		ev->offset	  = msg->msg_offset;
+		ev->sender        = msg->msg_from;
+		ev->mlength       = msg->msg_wanted;
+		ev->offset        = msg->msg_offset;
 	}
 
 	switch (ev_type) {
@@ -137,7 +137,7 @@ void
 lnet_msg_commit(lnet_msg_t *msg, int cpt)
 {
 	struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
-	lnet_counters_t		  *counters  = the_lnet.ln_counters[cpt];
+	lnet_counters_t *counters  = the_lnet.ln_counters[cpt];
 
 	/* routed message can be committed for both receiving and sending */
 	LASSERT(!msg->msg_tx_committed);
@@ -170,7 +170,7 @@ static void
 lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
 {
 	lnet_counters_t	*counters;
-	lnet_event_t	*ev = &msg->msg_ev;
+	lnet_event_t *ev = &msg->msg_ev;
 
 	LASSERT(msg->msg_tx_committed);
 	if (status != 0)
@@ -219,8 +219,8 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
 static void
 lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
 {
-	lnet_counters_t	*counters;
-	lnet_event_t	*ev = &msg->msg_ev;
+	lnet_counters_t *counters;
+	lnet_event_t *ev = &msg->msg_ev;
 
 	LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
 	LASSERT(msg->msg_rx_committed);
@@ -273,7 +273,7 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
 void
 lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
 {
-	int	cpt2 = cpt;
+	int cpt2 = cpt;
 
 	LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
 	LASSERT(msg->msg_onactivelist);
@@ -335,8 +335,8 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
 void
 lnet_msg_detach_md(lnet_msg_t *msg, int status)
 {
-	lnet_libmd_t	*md = msg->msg_md;
-	int		unlink;
+	lnet_libmd_t *md = msg->msg_md;
+	int unlink;
 
 	/* Now it's safe to drop my caller's ref */
 	md->md_refcount--;
@@ -359,8 +359,8 @@ static int
 lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
 {
 	lnet_handle_wire_t ack_wmd;
-	int		rc;
-	int		status = msg->msg_ev.status;
+	int rc;
+	int status = msg->msg_ev.status;
 
 	LASSERT(msg->msg_onactivelist);
 
@@ -434,11 +434,11 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
 void
 lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
 {
-	struct lnet_msg_container	*container;
-	int				my_slot;
-	int				cpt;
-	int				rc;
-	int				i;
+	struct lnet_msg_container *container;
+	int my_slot;
+	int cpt;
+	int rc;
+	int i;
 
 	LASSERT(!in_interrupt());
 
@@ -534,7 +534,7 @@ EXPORT_SYMBOL(lnet_finalize);
 void
 lnet_msg_container_cleanup(struct lnet_msg_container *container)
 {
-	int     count = 0;
+	int count = 0;
 
 	if (container->msc_init = 0)
 		return;
@@ -568,7 +568,7 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
 int
 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
 {
-	int	rc;
+	int rc;
 
 	container->msc_init = 1;
 
@@ -608,7 +608,7 @@ void
 lnet_msg_containers_destroy(void)
 {
 	struct lnet_msg_container *container;
-	int     i;
+	int i;
 
 	if (the_lnet.ln_msg_containers = NULL)
 		return;
@@ -624,8 +624,8 @@ int
 lnet_msg_containers_create(void)
 {
 	struct lnet_msg_container *container;
-	int	rc;
-	int	i;
+	int rc;
+	int i;
 
 	the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
 						      sizeof(*container));
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 3ba0da9..84707c5 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -39,7 +39,7 @@
 #include "../../include/linux/lnet/lib-lnet.h"
 
 /* NB: add /proc interfaces in upcoming patches */
-int	portal_rotor	= LNET_PTL_ROTOR_HASH_RT;
+int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
 module_param(portal_rotor, int, 0644);
 MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
 
@@ -47,8 +47,8 @@ static int
 lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
 		    __u64 mbits, __u64 ignore_bits)
 {
-	struct lnet_portal	*ptl = the_lnet.ln_portals[index];
-	int			unique;
+	struct lnet_portal *ptl = the_lnet.ln_portals[index];
+	int unique;
 
 	unique = ignore_bits = 0 &&
 		 match_id.nid != LNET_NID_ANY &&
@@ -89,7 +89,7 @@ static void
 lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
 {
 	struct lnet_match_table	*mtable = ptl->ptl_mtables[cpt];
-	int			i;
+	int i;
 
 	/* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
 	LASSERT(lnet_ptl_is_wildcard(ptl));
@@ -114,7 +114,7 @@ static void
 lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
 {
 	struct lnet_match_table	*mtable = ptl->ptl_mtables[cpt];
-	int			i;
+	int i;
 
 	/* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
 	LASSERT(lnet_ptl_is_wildcard(ptl));
@@ -141,9 +141,9 @@ lnet_try_match_md(lnet_libmd_t *md,
 {
 	/* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
 	 * lnet_match_blocked_msg() relies on this to avoid races */
-	unsigned int	offset;
-	unsigned int	mlength;
-	lnet_me_t	*me = md->md_me;
+	unsigned int offset;
+	unsigned int mlength;
+	lnet_me_t *me = md->md_me;
 
 	/* MD exhausted */
 	if (lnet_md_exhausted(md))
@@ -227,7 +227,7 @@ struct lnet_match_table *
 lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
 		  __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos)
 {
-	struct lnet_portal	*ptl;
+	struct lnet_portal *ptl;
 	struct lnet_match_table	*mtable;
 
 	/* NB: called w/o lock */
@@ -261,11 +261,11 @@ static struct lnet_match_table *
 lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
 {
 	struct lnet_match_table	*mtable;
-	struct lnet_portal	*ptl;
-	unsigned int		nmaps;
-	unsigned int		rotor;
-	unsigned int		cpt;
-	bool			routed;
+	struct lnet_portal *ptl;
+	unsigned int nmaps;
+	unsigned int rotor;
+	unsigned int cpt;
+	bool routed;
 
 	/* NB: called w/o lock */
 	LASSERT(info->mi_portal < the_lnet.ln_nportals);
@@ -312,8 +312,8 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
 static int
 lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
 {
-	__u64	*bmap;
-	int	i;
+	__u64 *bmap;
+	int i;
 
 	if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
 		return 0;
@@ -337,7 +337,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
 static void
 lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
 {
-	__u64	*bmap;
+	__u64 *bmap;
 
 	LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
 	LASSERT(pos <= LNET_MT_HASH_IGNORE);
@@ -373,11 +373,11 @@ int
 lnet_mt_match_md(struct lnet_match_table *mtable,
 		 struct lnet_match_info *info, struct lnet_msg *msg)
 {
-	struct list_head		*head;
-	lnet_me_t		*me;
-	lnet_me_t		*tmp;
-	int			exhausted = 0;
-	int			rc;
+	struct list_head *head;
+	lnet_me_t *me;
+	lnet_me_t *tmp;
+	int exhausted = 0;
+	int rc;
 
 	/* any ME with ignore bits? */
 	if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
@@ -428,7 +428,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
 static int
 lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
 {
-	int	rc;
+	int rc;
 
 	/* message arrived before any buffer posting on this portal,
 	 * simply delay or drop this message */
@@ -461,9 +461,9 @@ static int
 lnet_ptl_match_delay(struct lnet_portal *ptl,
 		     struct lnet_match_info *info, struct lnet_msg *msg)
 {
-	int	first = ptl->ptl_mt_maps[0]; /* read w/o lock */
-	int	rc = 0;
-	int	i;
+	int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
+	int rc = 0;
+	int i;
 
 	/* steal buffer from other CPTs, and delay it if nothing to steal,
 	 * this function is more expensive than a regular match, but we
@@ -472,7 +472,7 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
 
 	for (i = 0; i < LNET_CPT_NUMBER; i++) {
 		struct lnet_match_table *mtable;
-		int			cpt;
+		int cpt;
 
 		cpt = (first + i) % LNET_CPT_NUMBER;
 		mtable = ptl->ptl_mtables[cpt];
@@ -536,8 +536,8 @@ int
 lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
 {
 	struct lnet_match_table	*mtable;
-	struct lnet_portal	*ptl;
-	int			rc;
+	struct lnet_portal *ptl;
+	int rc;
 
 	CDEBUG(D_NET, "Request from %s of length %d into portal %d MB=%#llx\n",
 	       libcfs_id2str(info->mi_id), info->mi_rlength, info->mi_portal,
@@ -622,13 +622,13 @@ void
 lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
 		   struct list_head *matches, struct list_head *drops)
 {
-	struct lnet_portal	*ptl = the_lnet.ln_portals[me->me_portal];
+	struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
 	struct lnet_match_table	*mtable;
-	struct list_head		*head;
-	lnet_msg_t		*tmp;
-	lnet_msg_t		*msg;
-	int			exhausted = 0;
-	int			cpt;
+	struct list_head *head;
+	lnet_msg_t *tmp;
+	lnet_msg_t *msg;
+	int exhausted = 0;
+	int cpt;
 
 	LASSERT(md->md_refcount = 0); /* a brand new MD */
 
@@ -647,20 +647,20 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
 	head = &ptl->ptl_msg_stealing;
  again:
 	list_for_each_entry_safe(msg, tmp, head, msg_list) {
-		struct lnet_match_info	info;
-		lnet_hdr_t		*hdr;
-		int			rc;
+		struct lnet_match_info info;
+		lnet_hdr_t *hdr;
+		int rc;
 
 		LASSERT(msg->msg_rx_delayed || head = &ptl->ptl_msg_stealing);
 
-		hdr   = &msg->msg_hdr;
-		info.mi_id.nid	= hdr->src_nid;
-		info.mi_id.pid	= hdr->src_pid;
-		info.mi_opc	= LNET_MD_OP_PUT;
-		info.mi_portal	= hdr->msg.put.ptl_index;
-		info.mi_rlength	= hdr->payload_length;
-		info.mi_roffset	= hdr->msg.put.offset;
-		info.mi_mbits	= hdr->msg.put.match_bits;
+		hdr = &msg->msg_hdr;
+		info.mi_id.nid  = hdr->src_nid;
+		info.mi_id.pid  = hdr->src_pid;
+		info.mi_opc     = LNET_MD_OP_PUT;
+		info.mi_portal  = hdr->msg.put.ptl_index;
+		info.mi_rlength = hdr->payload_length;
+		info.mi_roffset = hdr->msg.put.offset;
+		info.mi_mbits   = hdr->msg.put.match_bits;
 
 		rc = lnet_try_match_md(md, &info, msg);
 
@@ -715,7 +715,7 @@ static void
 lnet_ptl_cleanup(struct lnet_portal *ptl)
 {
 	struct lnet_match_table	*mtable;
-	int			i;
+	int i;
 
 	if (ptl->ptl_mtables = NULL) /* uninitialized portal */
 		return;
@@ -723,9 +723,9 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
 	LASSERT(list_empty(&ptl->ptl_msg_delayed));
 	LASSERT(list_empty(&ptl->ptl_msg_stealing));
 	cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
-		struct list_head	*mhash;
-		lnet_me_t	*me;
-		int		j;
+		struct list_head *mhash;
+		lnet_me_t *me;
+		int j;
 
 		if (mtable->mt_mhash = NULL) /* uninitialized match-table */
 			continue;
@@ -753,9 +753,9 @@ static int
 lnet_ptl_setup(struct lnet_portal *ptl, int index)
 {
 	struct lnet_match_table	*mtable;
-	struct list_head		*mhash;
-	int			i;
-	int			j;
+	struct list_head *mhash;
+	int i;
+	int j;
 
 	ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
 					    sizeof(struct lnet_match_table));
@@ -798,7 +798,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
 void
 lnet_portals_destroy(void)
 {
-	int	i;
+	int i;
 
 	if (the_lnet.ln_portals = NULL)
 		return;
@@ -813,8 +813,8 @@ lnet_portals_destroy(void)
 int
 lnet_portals_create(void)
 {
-	int	size;
-	int	i;
+	int size;
+	int i;
 
 	size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]);
 
@@ -898,8 +898,8 @@ EXPORT_SYMBOL(LNetSetLazyPortal);
 int
 LNetClearLazyPortal(int portal)
 {
-	struct lnet_portal	*ptl;
-	LIST_HEAD		(zombies);
+	struct lnet_portal *ptl;
+	LIST_HEAD(zombies);
 
 	if (portal < 0 || portal >= the_lnet.ln_nportals)
 		return -EINVAL;
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index f708c2e..2a137f4 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -111,7 +111,7 @@ lnd_t the_lolnd = {
 	/* .lnd_type       = */ LOLND,
 	/* .lnd_startup    = */ lolnd_startup,
 	/* .lnd_shutdown   = */ lolnd_shutdown,
-	/* .lnt_ctl	= */ NULL,
+	/* .lnt_ctl        = */ NULL,
 	/* .lnd_send       = */ lolnd_send,
 	/* .lnd_recv       = */ lolnd_recv,
 	/* .lnd_eager_recv = */ NULL,
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 72b7fbc..ff3f831 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -47,7 +47,7 @@ static int
 lnet_configure(void *arg)
 {
 	/* 'arg' only there so I can be passed to cfs_create_thread() */
-	int    rc = 0;
+	int rc = 0;
 
 	LNET_MUTEX_LOCK(&lnet_config_mutex);
 
@@ -66,7 +66,7 @@ lnet_configure(void *arg)
 static int
 lnet_unconfigure(void)
 {
-	int   refcount;
+	int refcount;
 
 	LNET_MUTEX_LOCK(&lnet_config_mutex);
 
@@ -86,7 +86,7 @@ lnet_unconfigure(void)
 static int
 lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
 {
-	int   rc;
+	int rc;
 
 	switch (cmd) {
 	case IOC_LIBCFS_CONFIGURE:
@@ -113,7 +113,7 @@ static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
 static int __init
 init_lnet(void)
 {
-	int		  rc;
+	int rc;
 
 	mutex_init(&lnet_config_mutex);
 
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 45b5742..1fceed3 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -43,10 +43,10 @@
 int
 lnet_peer_tables_create(void)
 {
-	struct lnet_peer_table	*ptable;
-	struct list_head		*hash;
-	int			i;
-	int			j;
+	struct lnet_peer_table *ptable;
+	struct list_head *hash;
+	int i;
+	int j;
 
 	the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
 						   sizeof(*ptable));
@@ -77,10 +77,10 @@ lnet_peer_tables_create(void)
 void
 lnet_peer_tables_destroy(void)
 {
-	struct lnet_peer_table	*ptable;
-	struct list_head		*hash;
-	int			i;
-	int			j;
+	struct lnet_peer_table *ptable;
+	struct list_head *hash;
+	int i;
+	int j;
 
 	if (the_lnet.ln_peer_tables = NULL)
 		return;
@@ -106,9 +106,9 @@ lnet_peer_tables_destroy(void)
 void
 lnet_peer_tables_cleanup(void)
 {
-	struct lnet_peer_table	*ptable;
-	int			i;
-	int			j;
+	struct lnet_peer_table *ptable;
+	int i;
+	int j;
 
 	LASSERT(the_lnet.ln_shutdown);	/* i.e. no new peers */
 
@@ -133,7 +133,7 @@ lnet_peer_tables_cleanup(void)
 
 	cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
 		LIST_HEAD(deathrow);
-		lnet_peer_t	*lp;
+		lnet_peer_t *lp;
 
 		lnet_net_lock(i);
 
@@ -186,8 +186,8 @@ lnet_destroy_peer_locked(lnet_peer_t *lp)
 lnet_peer_t *
 lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
 {
-	struct list_head	*peers;
-	lnet_peer_t	*lp;
+	struct list_head *peers;
+	lnet_peer_t *lp;
 
 	LASSERT(!the_lnet.ln_shutdown);
 
@@ -205,11 +205,11 @@ lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
 int
 lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
 {
-	struct lnet_peer_table	*ptable;
-	lnet_peer_t		*lp = NULL;
-	lnet_peer_t		*lp2;
-	int			cpt2;
-	int			rc = 0;
+	struct lnet_peer_table *ptable;
+	lnet_peer_t *lp = NULL;
+	lnet_peer_t *lp2;
+	int cpt2;
+	int rc = 0;
 
 	*lpp = NULL;
 	if (the_lnet.ln_shutdown) /* it's shutting down */
@@ -287,8 +287,8 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
 		goto out;
 	}
 
-	lp->lp_txcredits    -	lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
+	lp->lp_txcredits     +	lp->lp_mintxcredits  = lp->lp_ni->ni_peertxcredits;
 	lp->lp_rtrcredits     	lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
 
@@ -308,10 +308,10 @@ out:
 void
 lnet_debug_peer(lnet_nid_t nid)
 {
-	char		*aliveness = "NA";
-	lnet_peer_t	*lp;
-	int		rc;
-	int		cpt;
+	char *aliveness = "NA";
+	lnet_peer_t *lp;
+	int rc;
+	int cpt;
 
 	cpt = lnet_cpt_of_nid(nid);
 	lnet_net_lock(cpt);
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 8510bae..96886a2 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -139,8 +139,8 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
 static void
 lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 {
-	int	alive;
-	int	notifylnd;
+	int alive;
+	int notifylnd;
 
 	/* Notify only in 1 thread at any time to ensure ordered notification.
 	 * NB individual events can be missed; the only guarantee is that you
@@ -152,7 +152,7 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 	lp->lp_notifying = 1;
 
 	while (lp->lp_notify) {
-		alive     = lp->lp_alive;
+		alive = lp->lp_alive;
 		notifylnd = lp->lp_notifylnd;
 
 		lp->lp_notifylnd = 0;
@@ -228,9 +228,9 @@ lnet_rtr_decref_locked(lnet_peer_t *lp)
 lnet_remotenet_t *
 lnet_find_net_locked(__u32 net)
 {
-	lnet_remotenet_t	*rnet;
-	struct list_head		*tmp;
-	struct list_head		*rn_list;
+	lnet_remotenet_t *rnet;
+	struct list_head *tmp;
+	struct list_head *rn_list;
 
 	LASSERT(!the_lnet.ln_shutdown);
 
@@ -276,9 +276,9 @@ static void lnet_shuffle_seed(void)
 static void
 lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
 {
-	unsigned int      len = 0;
-	unsigned int      offset = 0;
-	struct list_head       *e;
+	unsigned int len = 0;
+	unsigned int offset = 0;
+	struct list_head *e;
 
 	lnet_shuffle_seed();
 
@@ -304,13 +304,13 @@ int
 lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
 	       unsigned int priority)
 {
-	struct list_head	  *e;
-	lnet_remotenet_t    *rnet;
-	lnet_remotenet_t    *rnet2;
-	lnet_route_t	*route;
-	lnet_ni_t	   *ni;
-	int		  add_route;
-	int		  rc;
+	struct list_head *e;
+	lnet_remotenet_t *rnet;
+	lnet_remotenet_t *rnet2;
+	lnet_route_t *route;
+	lnet_ni_t *ni;
+	int add_route;
+	int rc;
 
 	CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
 	       libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
@@ -416,14 +416,14 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
 int
 lnet_check_routes(void)
 {
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*route;
-	lnet_route_t		*route2;
-	struct list_head		*e1;
-	struct list_head		*e2;
-	int			cpt;
-	struct list_head		*rn_list;
-	int			i;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *route;
+	lnet_route_t *route2;
+	struct list_head *e1;
+	struct list_head *e2;
+	int cpt;
+	struct list_head *rn_list;
+	int i;
 
 	cpt = lnet_net_lock_current();
 
@@ -434,9 +434,9 @@ lnet_check_routes(void)
 
 			route2 = NULL;
 			list_for_each(e2, &rnet->lrn_routes) {
-				lnet_nid_t	nid1;
-				lnet_nid_t	nid2;
-				int		net;
+				lnet_nid_t nid1;
+				lnet_nid_t nid2;
+				int net;
 
 				route = list_entry(e2, lnet_route_t,
 						       lr_list);
@@ -472,14 +472,14 @@ lnet_check_routes(void)
 int
 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
 {
-	struct lnet_peer	*gateway;
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*route;
-	struct list_head		*e1;
-	struct list_head		*e2;
-	int			rc = -ENOENT;
-	struct list_head		*rn_list;
-	int			idx = 0;
+	struct lnet_peer *gateway;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *route;
+	struct list_head *e1;
+	struct list_head *e2;
+	int rc = -ENOENT;
+	struct list_head *rn_list;
+	int idx = 0;
 
 	CDEBUG(D_NET, "Del route: net %s : gw %s\n",
 	       libcfs_net2str(net), libcfs_nid2str(gw_nid));
@@ -554,13 +554,13 @@ int
 lnet_get_route(int idx, __u32 *net, __u32 *hops,
 	       lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
 {
-	struct list_head		*e1;
-	struct list_head		*e2;
-	lnet_remotenet_t	*rnet;
-	lnet_route_t		*route;
-	int			cpt;
-	int			i;
-	struct list_head		*rn_list;
+	struct list_head *e1;
+	struct list_head *e2;
+	lnet_remotenet_t *rnet;
+	lnet_route_t *route;
+	int cpt;
+	int i;
+	struct list_head *rn_list;
 
 	cpt = lnet_net_lock_current();
 
@@ -574,11 +574,11 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
 						       lr_list);
 
 				if (idx-- = 0) {
-					*net	  = rnet->lrn_net;
-					*hops	  = route->lr_hops;
+					*net      = rnet->lrn_net;
+					*hops     = route->lr_hops;
 					*priority = route->lr_priority;
 					*gateway  = route->lr_gateway->lp_nid;
-					*alive	  = route->lr_gateway->lp_alive;
+					*alive    = route->lr_gateway->lp_alive;
 					lnet_net_unlock(cpt);
 					return 0;
 				}
@@ -593,7 +593,7 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
 void
 lnet_swap_pinginfo(lnet_ping_info_t *info)
 {
-	int	       i;
+	int i;
 	lnet_ni_status_t *stat;
 
 	__swab32s(&info->pi_magic);
@@ -614,9 +614,9 @@ lnet_swap_pinginfo(lnet_ping_info_t *info)
 static void
 lnet_parse_rc_info(lnet_rc_data_t *rcd)
 {
-	lnet_ping_info_t	*info = rcd->rcd_pinginfo;
-	struct lnet_peer	*gw   = rcd->rcd_gateway;
-	lnet_route_t		*rtr;
+	lnet_ping_info_t *info = rcd->rcd_pinginfo;
+	struct lnet_peer *gw = rcd->rcd_gateway;
+	lnet_route_t *rtr;
 
 	if (!gw->lp_alive)
 		return;
@@ -643,14 +643,14 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
 		return; /* can't carry NI status info */
 
 	list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
-		int	ptl_status = LNET_NI_STATUS_INVALID;
-		int	down = 0;
-		int	up = 0;
-		int	i;
+		int ptl_status = LNET_NI_STATUS_INVALID;
+		int down = 0;
+		int up = 0;
+		int i;
 
 		for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
 			lnet_ni_status_t *stat = &info->pi_ni[i];
-			lnet_nid_t	 nid = stat->ns_nid;
+			lnet_nid_t nid = stat->ns_nid;
 
 			if (nid = LNET_NID_ANY) {
 				CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
@@ -699,8 +699,8 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
 static void
 lnet_router_checker_event(lnet_event_t *event)
 {
-	lnet_rc_data_t		*rcd = event->md.user_ptr;
-	struct lnet_peer	*lp;
+	lnet_rc_data_t *rcd = event->md.user_ptr;
+	struct lnet_peer *lp;
 
 	LASSERT(rcd != NULL);
 
@@ -752,14 +752,14 @@ lnet_router_checker_event(lnet_event_t *event)
 static void
 lnet_wait_known_routerstate(void)
 {
-	lnet_peer_t	 *rtr;
-	struct list_head	  *entry;
-	int		  all_known;
+	lnet_peer_t *rtr;
+	struct list_head *entry;
+	int all_known;
 
 	LASSERT(the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING);
 
 	for (;;) {
-		int	cpt = lnet_net_lock_current();
+		int cpt = lnet_net_lock_current();
 
 		all_known = 1;
 		list_for_each(entry, &the_lnet.ln_routers) {
@@ -799,9 +799,9 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
 static void
 lnet_update_ni_status_locked(void)
 {
-	lnet_ni_t	*ni;
-	long		now;
-	int		timeout;
+	lnet_ni_t *ni;
+	long now;
+	int timeout;
 
 	LASSERT(the_lnet.ln_routing);
 
@@ -860,10 +860,10 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
 static lnet_rc_data_t *
 lnet_create_rc_data_locked(lnet_peer_t *gateway)
 {
-	lnet_rc_data_t		*rcd = NULL;
-	lnet_ping_info_t	*pi;
-	int			rc;
-	int			i;
+	lnet_rc_data_t *rcd = NULL;
+	lnet_ping_info_t *pi;
+	int rc;
+	int i;
 
 	lnet_net_unlock(gateway->lp_cpt);
 
@@ -943,8 +943,8 @@ static void
 lnet_ping_router_locked(lnet_peer_t *rtr)
 {
 	lnet_rc_data_t *rcd = NULL;
-	unsigned long      now = cfs_time_current();
-	int	     secs;
+	unsigned long now = cfs_time_current();
+	int secs;
 
 	lnet_peer_addref_locked(rtr);
 
@@ -979,9 +979,9 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
 	if (secs != 0 && !rtr->lp_ping_notsent &&
 	    cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
 					     cfs_time_seconds(secs)))) {
-		int	       rc;
+		int rc;
 		lnet_process_id_t id;
-		lnet_handle_md_t  mdh;
+		lnet_handle_md_t mdh;
 
 		id.nid = rtr->lp_nid;
 		id.pid = LUSTRE_SRV_LNET_PID;
@@ -1013,8 +1013,8 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
 int
 lnet_router_checker_start(void)
 {
-	int	  rc;
-	int	  eqsz;
+	int rc;
+	int eqsz;
 
 	LASSERT(the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN);
 
@@ -1085,11 +1085,11 @@ lnet_router_checker_stop(void)
 static void
 lnet_prune_rc_data(int wait_unlink)
 {
-	lnet_rc_data_t		*rcd;
-	lnet_rc_data_t		*tmp;
-	lnet_peer_t		*lp;
-	struct list_head		head;
-	int			i = 2;
+	lnet_rc_data_t *rcd;
+	lnet_rc_data_t *tmp;
+	lnet_peer_t *lp;
+	struct list_head head;
+	int i = 2;
 
 	if (likely(the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING &&
 		   list_empty(&the_lnet.ln_rcd_deathrow) &&
@@ -1169,17 +1169,17 @@ lnet_prune_rc_data(int wait_unlink)
 static int
 lnet_router_checker(void *arg)
 {
-	lnet_peer_t       *rtr;
-	struct list_head	*entry;
+	lnet_peer_t *rtr;
+	struct list_head *entry;
 
 	cfs_block_allsigs();
 
 	LASSERT(the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING);
 
 	while (the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING) {
-		__u64	version;
-		int	cpt;
-		int	cpt2;
+		__u64 version;
+		int cpt;
+		int cpt2;
 
 		cpt = lnet_net_lock_current();
 rescan:
@@ -1245,11 +1245,11 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
 static lnet_rtrbuf_t *
 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
 {
-	int	    npages = rbp->rbp_npages;
-	int	    sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
-	struct page   *page;
+	int npages = rbp->rbp_npages;
+	int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+	struct page *page;
 	lnet_rtrbuf_t *rb;
-	int	    i;
+	int i;
 
 	LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
 	if (rb = NULL)
@@ -1280,9 +1280,9 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
 static void
 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
 {
-	int		npages = rbp->rbp_npages;
-	int		nbuffers = 0;
-	lnet_rtrbuf_t	*rb;
+	int npages = rbp->rbp_npages;
+	int nbuffers = 0;
+	lnet_rtrbuf_t *rb;
 
 	if (rbp->rbp_nbuffers = 0) /* not initialized or already freed */
 		return;
@@ -1310,7 +1310,7 @@ static int
 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
 {
 	lnet_rtrbuf_t *rb;
-	int	    i;
+	int i;
 
 	if (rbp->rbp_nbuffers != 0) {
 		LASSERT(rbp->rbp_nbuffers = nbufs);
@@ -1355,7 +1355,7 @@ void
 lnet_rtrpools_free(void)
 {
 	lnet_rtrbufpool_t *rtrp;
-	int		  i;
+	int i;
 
 	if (the_lnet.ln_rtrpools = NULL) /* uninitialized or freed */
 		return;
@@ -1373,7 +1373,7 @@ lnet_rtrpools_free(void)
 static int
 lnet_nrb_tiny_calculate(int npages)
 {
-	int	nrbs = LNET_NRB_TINY;
+	int nrbs = LNET_NRB_TINY;
 
 	if (tiny_router_buffers < 0) {
 		LCONSOLE_ERROR_MSG(0x10c,
@@ -1392,7 +1392,7 @@ lnet_nrb_tiny_calculate(int npages)
 static int
 lnet_nrb_small_calculate(int npages)
 {
-	int	nrbs = LNET_NRB_SMALL;
+	int nrbs = LNET_NRB_SMALL;
 
 	if (small_router_buffers < 0) {
 		LCONSOLE_ERROR_MSG(0x10c,
@@ -1411,7 +1411,7 @@ lnet_nrb_small_calculate(int npages)
 static int
 lnet_nrb_large_calculate(int npages)
 {
-	int	nrbs = LNET_NRB_LARGE;
+	int nrbs = LNET_NRB_LARGE;
 
 	if (large_router_buffers < 0) {
 		LCONSOLE_ERROR_MSG(0x10c,
@@ -1431,13 +1431,13 @@ int
 lnet_rtrpools_alloc(int im_a_router)
 {
 	lnet_rtrbufpool_t *rtrp;
-	int	large_pages;
-	int	small_pages = 1;
-	int	nrb_tiny;
-	int	nrb_small;
-	int	nrb_large;
-	int	rc;
-	int	i;
+	int large_pages;
+	int small_pages = 1;
+	int nrb_tiny;
+	int nrb_small;
+	int nrb_large;
+	int rc;
+	int i;
 
 	large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
@@ -1507,9 +1507,9 @@ lnet_rtrpools_alloc(int im_a_router)
 int
 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 {
-	struct lnet_peer	*lp = NULL;
-	unsigned long		now = cfs_time_current();
-	int			cpt = lnet_cpt_of_nid(nid);
+	struct lnet_peer *lp = NULL;
+	unsigned long now = cfs_time_current();
+	int cpt = lnet_cpt_of_nid(nid);
 
 	LASSERT(!in_interrupt ());
 
@@ -1591,13 +1591,13 @@ void
 lnet_router_checker(void)
 {
 	static time_t last;
-	static int    running;
+	static int running;
 
-	time_t	    now = get_seconds();
-	int	       interval = now - last;
-	int	       rc;
-	__u64	     version;
-	lnet_peer_t      *rtr;
+	time_t now = get_seconds();
+	int interval = now - last;
+	int rc;
+	__u64 version;
+	lnet_peer_t *rtr;
 
 	/* It's no use to call me again within a sec - all intervals and
 	 * timeouts are measured in seconds */
@@ -1625,7 +1625,7 @@ lnet_router_checker(void)
 
 	/* consume all pending events */
 	while (1) {
-		int	  i;
+		int i;
 		lnet_event_t ev;
 
 		/* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index c055afc..ee902dc 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -112,11 +112,11 @@ static int proc_call_handler(void *data, int write, loff_t *ppos,
 static int __proc_lnet_stats(void *data, int write,
 			     loff_t pos, void __user *buffer, int nob)
 {
-	int	      rc;
+	int rc;
 	lnet_counters_t *ctrs;
-	int	      len;
-	char	    *tmpstr;
-	const int	tmpsiz = 256; /* 7 %u and 4 %llu */
+	int len;
+	char *tmpstr;
+	const int tmpsiz = 256; /* 7 %u and 4 %llu */
 
 	if (write) {
 		lnet_counters_reset();
@@ -167,13 +167,13 @@ static int proc_lnet_stats(struct ctl_table *table, int write,
 static int proc_lnet_routes(struct ctl_table *table, int write,
 			    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	const int	tmpsiz = 256;
-	char		*tmpstr;
-	char		*s;
-	int		rc = 0;
-	int		len;
-	int		ver;
-	int		off;
+	const int tmpsiz = 256;
+	char *tmpstr;
+	char *s;
+	int rc = 0;
+	int len;
+	int ver;
+	int off;
 
 	CLASSERT(sizeof(loff_t) >= 4);
 
@@ -205,13 +205,13 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
 		lnet_net_unlock(0);
 		*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
 	} else {
-		struct list_head		*n;
-		struct list_head		*r;
-		lnet_route_t		*route = NULL;
-		lnet_remotenet_t	*rnet  = NULL;
-		int			skip  = off - 1;
-		struct list_head		*rn_list;
-		int			i;
+		struct list_head *n;
+		struct list_head *r;
+		lnet_route_t *route = NULL;
+		lnet_remotenet_t *rnet  = NULL;
+		int skip  = off - 1;
+		struct list_head *rn_list;
+		int i;
 
 		lnet_net_lock(0);
 
@@ -251,11 +251,11 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
 		}
 
 		if (route != NULL) {
-			__u32        net	= rnet->lrn_net;
-			unsigned int hops	= route->lr_hops;
-			unsigned int priority	= route->lr_priority;
-			lnet_nid_t   nid	= route->lr_gateway->lp_nid;
-			int          alive	= route->lr_gateway->lp_alive;
+			__u32 net = rnet->lrn_net;
+			unsigned int hops = route->lr_hops;
+			unsigned int priority = route->lr_priority;
+			lnet_nid_t nid = route->lr_gateway->lp_nid;
+			int alive = route->lr_gateway->lp_alive;
 
 			s += snprintf(s, tmpstr + tmpsiz - s,
 				      "%-8s %4u %8u %7s %s\n",
@@ -293,13 +293,13 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
 static int proc_lnet_routers(struct ctl_table *table, int write,
 			     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	int	rc = 0;
-	char      *tmpstr;
-	char      *s;
-	const int  tmpsiz = 256;
-	int	len;
-	int	ver;
-	int	off;
+	int rc = 0;
+	char *tmpstr;
+	char *s;
+	const int tmpsiz = 256;
+	int len;
+	int ver;
+	int off;
 
 	off = LNET_PROC_HOFF_GET(*ppos);
 	ver = LNET_PROC_VER_GET(*ppos);
@@ -328,9 +328,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 		lnet_net_unlock(0);
 		*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
 	} else {
-		struct list_head		*r;
-		struct lnet_peer	*peer = NULL;
-		int			skip = off - 1;
+		struct list_head *r;
+		struct lnet_peer *peer = NULL;
+		int skip = off - 1;
 
 		lnet_net_lock(0);
 
@@ -360,14 +360,14 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 			lnet_nid_t nid = peer->lp_nid;
 			unsigned long now = cfs_time_current();
 			unsigned long deadline = peer->lp_ping_deadline;
-			int nrefs     = peer->lp_refcount;
-			int nrtrrefs  = peer->lp_rtr_refcount;
+			int nrefs = peer->lp_refcount;
+			int nrtrrefs = peer->lp_rtr_refcount;
 			int alive_cnt = peer->lp_alive_count;
-			int alive     = peer->lp_alive;
-			int pingsent  = !peer->lp_ping_notsent;
+			int alive = peer->lp_alive;
+			int pingsent = !peer->lp_ping_notsent;
 			int last_ping = cfs_duration_sec(cfs_time_sub(now,
 						     peer->lp_ping_timestamp));
-			int down_ni   = 0;
+			int down_ni = 0;
 			lnet_route_t *rtr;
 
 			if ((peer->lp_ping_feats &
@@ -428,16 +428,16 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 static int proc_lnet_peers(struct ctl_table *table, int write,
 			   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	const int		tmpsiz  = 256;
-	struct lnet_peer_table	*ptable;
-	char			*tmpstr;
-	char			*s;
-	int			cpt  = LNET_PROC_CPT_GET(*ppos);
-	int			ver  = LNET_PROC_VER_GET(*ppos);
-	int			hash = LNET_PROC_HASH_GET(*ppos);
-	int			hoff = LNET_PROC_HOFF_GET(*ppos);
-	int			rc = 0;
-	int			len;
+	const int tmpsiz  = 256;
+	struct lnet_peer_table *ptable;
+	char *tmpstr;
+	char *s;
+	int cpt  = LNET_PROC_CPT_GET(*ppos);
+	int ver  = LNET_PROC_VER_GET(*ppos);
+	int hash = LNET_PROC_HASH_GET(*ppos);
+	int hoff = LNET_PROC_HOFF_GET(*ppos);
+	int rc = 0;
+	int len;
 
 	CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
 	LASSERT(!write);
@@ -465,9 +465,9 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
 
 		hoff++;
 	} else {
-		struct lnet_peer	*peer;
-		struct list_head		*p;
-		int			skip;
+		struct lnet_peer *peer;
+		struct list_head *p;
+		int skip;
  again:
 		p = NULL;
 		peer = NULL;
@@ -521,23 +521,23 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
 		}
 
 		if (peer != NULL) {
-			lnet_nid_t nid       = peer->lp_nid;
-			int	nrefs     = peer->lp_refcount;
-			int	lastalive = -1;
-			char      *aliveness = "NA";
-			int	maxcr     = peer->lp_ni->ni_peertxcredits;
-			int	txcr      = peer->lp_txcredits;
-			int	mintxcr   = peer->lp_mintxcredits;
-			int	rtrcr     = peer->lp_rtrcredits;
-			int	minrtrcr  = peer->lp_minrtrcredits;
-			int	txqnob    = peer->lp_txqnob;
+			lnet_nid_t nid = peer->lp_nid;
+			int nrefs = peer->lp_refcount;
+			int lastalive = -1;
+			char *aliveness = "NA";
+			int maxcr = peer->lp_ni->ni_peertxcredits;
+			int txcr = peer->lp_txcredits;
+			int mintxcr = peer->lp_mintxcredits;
+			int rtrcr = peer->lp_rtrcredits;
+			int minrtrcr = peer->lp_minrtrcredits;
+			int txqnob = peer->lp_txqnob;
 
 			if (lnet_isrouter(peer) ||
 			    lnet_peer_aliveness_enabled(peer))
 				aliveness = peer->lp_alive ? "up" : "down";
 
 			if (lnet_peer_aliveness_enabled(peer)) {
-				unsigned long     now = cfs_time_current();
+				unsigned long now = cfs_time_current();
 				long delta;
 
 				delta = cfs_time_sub(now, peer->lp_last_alive);
@@ -595,13 +595,13 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
 static int __proc_lnet_buffers(void *data, int write,
 			       loff_t pos, void __user *buffer, int nob)
 {
-	char	    *s;
-	char	    *tmpstr;
-	int		tmpsiz;
-	int		idx;
-	int		len;
-	int		rc;
-	int		i;
+	char *s;
+	char *tmpstr;
+	int tmpsiz;
+	int idx;
+	int len;
+	int rc;
+	int i;
 
 	LASSERT(!write);
 
@@ -660,11 +660,11 @@ static int proc_lnet_buffers(struct ctl_table *table, int write,
 static int proc_lnet_nis(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	int	tmpsiz = 128 * LNET_CPT_NUMBER;
-	int	rc = 0;
-	char      *tmpstr;
-	char      *s;
-	int	len;
+	int tmpsiz = 128 * LNET_CPT_NUMBER;
+	int rc = 0;
+	char *tmpstr;
+	char *s;
+	int len;
 
 	LASSERT(!write);
 
@@ -684,9 +684,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
 			      "rtr", "max", "tx", "min");
 		LASSERT(tmpstr + tmpsiz - s > 0);
 	} else {
-		struct list_head	*n;
-		lnet_ni_t	 *ni   = NULL;
-		int		skip = *ppos - 1;
+		struct list_head *n;
+		lnet_ni_t *ni   = NULL;
+		int skip = *ppos - 1;
 
 		lnet_net_lock(0);
 
@@ -705,12 +705,12 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
 		}
 
 		if (ni != NULL) {
-			struct lnet_tx_queue	*tq;
-			char	*stat;
-			long	now = get_seconds();
-			int	last_alive = -1;
-			int	i;
-			int	j;
+			struct lnet_tx_queue *tq;
+			char *stat;
+			long now = get_seconds();
+			int last_alive = -1;
+			int i;
+			int j;
 
 			if (the_lnet.ln_routing)
 				last_alive = now - ni->ni_last_alive;
@@ -777,9 +777,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
 }
 
 struct lnet_portal_rotors {
-	int	     pr_value;
-	const char      *pr_name;
-	const char	*pr_desc;
+	int pr_value;
+	const char *pr_name;
+	const char *pr_desc;
 };
 
 static struct lnet_portal_rotors	portal_rotors[] = {
@@ -815,11 +815,11 @@ extern int portal_rotor;
 static int __proc_lnet_portal_rotor(void *data, int write,
 				    loff_t pos, void __user *buffer, int nob)
 {
-	const int	buf_len	= 128;
-	char		*buf;
-	char		*tmp;
-	int		rc;
-	int		i;
+	const int buf_len = 128;
+	char *buf;
+	char *tmp;
+	int rc;
+	int i;
 
 	LIBCFS_ALLOC(buf, buf_len);
 	if (buf = NULL)
@@ -887,38 +887,38 @@ static struct ctl_table lnet_table[] = {
 	 * to go via /proc for portability.
 	 */
 	{
-		.procname = "stats",
-		.mode     = 0644,
+		.procname     = "stats",
+		.mode         = 0644,
 		.proc_handler = &proc_lnet_stats,
 	},
 	{
-		.procname = "routes",
-		.mode     = 0444,
+		.procname     = "routes",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_routes,
 	},
 	{
-		.procname = "routers",
-		.mode     = 0444,
+		.procname     = "routers",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_routers,
 	},
 	{
-		.procname = "peers",
-		.mode     = 0444,
+		.procname     = "peers",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_peers,
 	},
 	{
-		.procname = "buffers",
-		.mode     = 0444,
+		.procname     = "buffers",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_buffers,
 	},
 	{
-		.procname = "nis",
-		.mode     = 0444,
+		.procname     = "nis",
+		.mode         = 0444,
 		.proc_handler = &proc_lnet_nis,
 	},
 	{
-		.procname = "portal_rotor",
-		.mode     = 0644,
+		.procname     = "portal_rotor",
+		.mode         = 0644,
 		.proc_handler = &proc_lnet_portal_rotor,
 	},
 	{
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (11 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync Mike Shuey
                   ` (9 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

lnet_fini_locks() does nothing.  Remove.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/api-ni.c |    8 --------
 1 files changed, 0 insertions(+), 8 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 6910f56..1adc481 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -98,12 +98,6 @@ lnet_init_locks(void)
 	mutex_init(&the_lnet.ln_api_mutex);
 }
 
-static void
-lnet_fini_locks(void)
-{
-}
-
-
 static int
 lnet_create_remote_nets_table(void)
 {
@@ -153,8 +147,6 @@ lnet_destroy_locks(void)
 		cfs_percpt_lock_free(the_lnet.ln_net_lock);
 		the_lnet.ln_net_lock = NULL;
 	}
-
-	lnet_fini_locks();
 }
 
 static int
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (12 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals Mike Shuey
                   ` (8 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

LNetSetAsync() returns 0, and is never called.  Doesn't exist in the Intel
tree, either.  Remove it.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/include/linux/lnet/api.h |    1 -
 drivers/staging/lustre/lnet/lnet/lib-move.c     |   24 -----------------------
 2 files changed, 0 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index cd86517..c4dc1b2 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -209,7 +209,6 @@ int LNetGet(lnet_nid_t	self,
 int LNetSetLazyPortal(int portal);
 int LNetClearLazyPortal(int portal);
 int LNetCtl(unsigned int cmd, void *arg);
-int LNetSetAsync(lnet_process_id_t id, int nasync);
 
 /** @} lnet_misc */
 
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index ba59fe7..433faae 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -2434,27 +2434,3 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
 	return -EHOSTUNREACH;
 }
 EXPORT_SYMBOL(LNetDist);
-
-/**
- * Set the number of asynchronous messages expected from a target process.
- *
- * This function is only meaningful for userspace callers. It's a no-op when
- * called from kernel.
- *
- * Asynchronous messages are those that can come from a target when the
- * userspace process is not waiting for IO to complete; e.g., AST callbacks
- * from Lustre servers. Specifying the expected number of such messages
- * allows them to be eagerly received when user process is not running in
- * LNet; otherwise network errors may occur.
- *
- * \param id Process ID of the target process.
- * \param nasync Number of asynchronous messages expected from the target.
- *
- * \return 0 on success, and an error code otherwise.
- */
-int
-LNetSetAsync(lnet_process_id_t id, int nasync)
-{
-	return 0;
-}
-EXPORT_SYMBOL(LNetSetAsync);
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (13 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
                   ` (7 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Fix the module version to match upstream development.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/module.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index ff3f831..f73d644 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -147,7 +147,7 @@ fini_lnet(void)
 }
 
 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
-MODULE_DESCRIPTION("Portals v3.1");
+MODULE_DESCRIPTION("LNet v3.1");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0.0");
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (14 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro Mike Shuey
                   ` (6 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Unify variable declarations to use a single whitespace.  Also line up
declarations and comments in o2iblnd.h.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c    |  458 +++++++-------
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h    |  680 ++++++++++----------
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |  484 +++++++-------
 .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c  |   48 +-
 4 files changed, 841 insertions(+), 829 deletions(-)

diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 3bad441..a57c5c3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -42,21 +42,21 @@
 #include <asm/div64.h>
 
 static lnd_t the_o2iblnd = {
-	.lnd_type       = O2IBLND,
-	.lnd_startup    = kiblnd_startup,
-	.lnd_shutdown   = kiblnd_shutdown,
-	.lnd_ctl	= kiblnd_ctl,
-	.lnd_query      = kiblnd_query,
-	.lnd_send       = kiblnd_send,
-	.lnd_recv       = kiblnd_recv,
+	.lnd_type     = O2IBLND,
+	.lnd_startup  = kiblnd_startup,
+	.lnd_shutdown = kiblnd_shutdown,
+	.lnd_ctl      = kiblnd_ctl,
+	.lnd_query    = kiblnd_query,
+	.lnd_send     = kiblnd_send,
+	.lnd_recv     = kiblnd_recv,
 };
 
-kib_data_t	      kiblnd_data;
+kib_data_t kiblnd_data;
 
 static __u32 kiblnd_cksum(void *ptr, int nob)
 {
-	char  *c  = ptr;
-	__u32  sum = 0;
+	char *c = ptr;
+	__u32 sum = 0;
 
 	while (nob-- > 0)
 		sum = ((sum << 1) | (sum >> 31)) + *c++;
@@ -138,10 +138,10 @@ static int kiblnd_msgtype2size(int type)
 
 static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
 {
-	kib_rdma_desc_t   *rd;
-	int		nob;
-	int		n;
-	int		i;
+	kib_rdma_desc_t *rd;
+	int nob;
+	int n;
+	int i;
 
 	LASSERT(msg->ibm_type = IBLND_MSG_GET_REQ ||
 		 msg->ibm_type = IBLND_MSG_PUT_ACK);
@@ -210,10 +210,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
 int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
 {
 	const int hdr_size = offsetof(kib_msg_t, ibm_u);
-	__u32     msg_cksum;
-	__u16     version;
-	int       msg_nob;
-	int       flip;
+	__u32 msg_cksum;
+	__u16 version;
+	int msg_nob;
+	int flip;
 
 	/* 6 bytes are enough to have received magic + version */
 	if (nob < 6) {
@@ -320,10 +320,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
 
 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
 {
-	kib_peer_t	*peer;
-	kib_net_t	*net = ni->ni_data;
-	int		cpt = lnet_cpt_of_nid(nid);
-	unsigned long   flags;
+	kib_peer_t *peer;
+	kib_net_t *net = ni->ni_data;
+	int cpt = lnet_cpt_of_nid(nid);
+	unsigned long flags;
 
 	LASSERT(net != NULL);
 	LASSERT(nid != LNET_NID_ANY);
@@ -385,9 +385,9 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
 {
 	/* the caller is responsible for accounting the additional reference
 	 * that this creates */
-	struct list_head       *peer_list = kiblnd_nid2peerlist(nid);
-	struct list_head       *tmp;
-	kib_peer_t       *peer;
+	struct list_head *peer_list = kiblnd_nid2peerlist(nid);
+	struct list_head *tmp;
+	kib_peer_t *peer;
 
 	list_for_each(tmp, peer_list) {
 
@@ -422,10 +422,10 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer)
 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
 				lnet_nid_t *nidp, int *count)
 {
-	kib_peer_t	    *peer;
-	struct list_head	    *ptmp;
-	int		    i;
-	unsigned long	  flags;
+	kib_peer_t *peer;
+	struct list_head *ptmp;
+	int i;
+	unsigned long flags;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -459,9 +459,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
 
 static void kiblnd_del_peer_locked(kib_peer_t *peer)
 {
-	struct list_head	   *ctmp;
-	struct list_head	   *cnxt;
-	kib_conn_t	   *conn;
+	struct list_head *ctmp;
+	struct list_head *cnxt;
+	kib_conn_t *conn;
 
 	if (list_empty(&peer->ibp_conns)) {
 		kiblnd_unlink_peer_locked(peer);
@@ -480,14 +480,14 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer)
 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
 {
 	LIST_HEAD(zombies);
-	struct list_head	    *ptmp;
-	struct list_head	    *pnxt;
-	kib_peer_t	    *peer;
-	int		    lo;
-	int		    hi;
-	int		    i;
-	unsigned long	  flags;
-	int		    rc = -ENOENT;
+	struct list_head *ptmp;
+	struct list_head *pnxt;
+	kib_peer_t *peer;
+	int lo;
+	int hi;
+	int i;
+	unsigned long flags;
+	int rc = -ENOENT;
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -532,12 +532,12 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
 
 static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
 {
-	kib_peer_t	    *peer;
-	struct list_head	    *ptmp;
-	kib_conn_t	    *conn;
-	struct list_head	    *ctmp;
-	int		    i;
-	unsigned long	  flags;
+	kib_peer_t *peer;
+	struct list_head *ptmp;
+	kib_conn_t *conn;
+	struct list_head *ctmp;
+	int i;
+	unsigned long flags;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -593,7 +593,7 @@ int kiblnd_translate_mtu(int value)
 
 static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
 {
-	int	   mtu;
+	int mtu;
 
 	/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
 	if (cmid->route.path_rec = NULL)
@@ -607,11 +607,11 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
 
 static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
 {
-	cpumask_t	*mask;
-	int		vectors;
-	int		off;
-	int		i;
-	lnet_nid_t	nid = conn->ibc_peer->ibp_nid;
+	cpumask_t *mask;
+	int vectors;
+	int off;
+	int i;
+	lnet_nid_t nid = conn->ibc_peer->ibp_nid;
 
 	vectors = conn->ibc_cmid->device->num_comp_vectors;
 	if (vectors <= 1)
@@ -642,17 +642,17 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 	 * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
 	 * to destroy 'cmid' here since I'm called from the CM which still has
 	 * its ref on 'cmid'). */
-	rwlock_t		*glock = &kiblnd_data.kib_global_lock;
-	kib_net_t	      *net = peer->ibp_ni->ni_data;
-	kib_dev_t	      *dev;
+	rwlock_t *glock = &kiblnd_data.kib_global_lock;
+	kib_net_t *net = peer->ibp_ni->ni_data;
+	kib_dev_t *dev;
 	struct ib_qp_init_attr *init_qp_attr;
-	struct kib_sched_info	*sched;
-	kib_conn_t		*conn;
-	struct ib_cq		*cq;
-	unsigned long		flags;
-	int			cpt;
-	int			rc;
-	int			i;
+	struct kib_sched_info *sched;
+	kib_conn_t *conn;
+	struct ib_cq *cq;
+	unsigned long flags;
+	int cpt;
+	int rc;
+	int i;
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
@@ -837,8 +837,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 void kiblnd_destroy_conn(kib_conn_t *conn)
 {
 	struct rdma_cm_id *cmid = conn->ibc_cmid;
-	kib_peer_t	*peer = conn->ibc_peer;
-	int		rc;
+	kib_peer_t *peer = conn->ibc_peer;
+	int rc;
 
 	LASSERT(!in_interrupt());
 	LASSERT(atomic_read(&conn->ibc_refcount) = 0);
@@ -904,10 +904,10 @@ void kiblnd_destroy_conn(kib_conn_t *conn)
 
 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
 {
-	kib_conn_t	     *conn;
-	struct list_head	     *ctmp;
-	struct list_head	     *cnxt;
-	int		     count = 0;
+	kib_conn_t *conn;
+	struct list_head *ctmp;
+	struct list_head *cnxt;
+	int count = 0;
 
 	list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
 		conn = list_entry(ctmp, kib_conn_t, ibc_list);
@@ -926,10 +926,10 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
 				     int version, __u64 incarnation)
 {
-	kib_conn_t	     *conn;
-	struct list_head	     *ctmp;
-	struct list_head	     *cnxt;
-	int		     count = 0;
+	kib_conn_t *conn;
+	struct list_head *ctmp;
+	struct list_head *cnxt;
+	int count = 0;
 
 	list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
 		conn = list_entry(ctmp, kib_conn_t, ibc_list);
@@ -953,14 +953,14 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
 
 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
 {
-	kib_peer_t	     *peer;
-	struct list_head	     *ptmp;
-	struct list_head	     *pnxt;
-	int		     lo;
-	int		     hi;
-	int		     i;
-	unsigned long	   flags;
-	int		     count = 0;
+	kib_peer_t *peer;
+	struct list_head *ptmp;
+	struct list_head *pnxt;
+	int lo;
+	int hi;
+	int i;
+	unsigned long flags;
+	int count = 0;
 
 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
@@ -1001,17 +1001,17 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 {
 	struct libcfs_ioctl_data *data = arg;
-	int		       rc = -EINVAL;
+	int rc = -EINVAL;
 
 	switch (cmd) {
 	case IOC_LIBCFS_GET_PEER: {
-		lnet_nid_t   nid = 0;
-		int	  count = 0;
+		lnet_nid_t nid = 0;
+		int count = 0;
 
 		rc = kiblnd_get_peer_info(ni, data->ioc_count,
 					  &nid, &count);
-		data->ioc_nid    = nid;
-		data->ioc_count  = count;
+		data->ioc_nid   = nid;
+		data->ioc_count = count;
 		break;
 	}
 
@@ -1053,11 +1053,11 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 
 void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 {
-	unsigned long	last_alive = 0;
-	unsigned long	now = cfs_time_current();
-	rwlock_t	*glock = &kiblnd_data.kib_global_lock;
-	kib_peer_t	*peer;
-	unsigned long	flags;
+	unsigned long last_alive = 0;
+	unsigned long now = cfs_time_current();
+	rwlock_t *glock = &kiblnd_data.kib_global_lock;
+	kib_peer_t *peer;
+	unsigned long flags;
 
 	read_lock_irqsave(glock, flags);
 
@@ -1086,8 +1086,8 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 
 void kiblnd_free_pages(kib_pages_t *p)
 {
-	int	npages = p->ibp_npages;
-	int	i;
+	int npages = p->ibp_npages;
+	int i;
 
 	for (i = 0; i < npages; i++) {
 		if (p->ibp_pages[i] != NULL)
@@ -1099,8 +1099,8 @@ void kiblnd_free_pages(kib_pages_t *p)
 
 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
 {
-	kib_pages_t	*p;
-	int		i;
+	kib_pages_t *p;
+	int i;
 
 	LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
 			 offsetof(kib_pages_t, ibp_pages[npages]));
@@ -1130,7 +1130,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
 void kiblnd_unmap_rx_descs(kib_conn_t *conn)
 {
 	kib_rx_t *rx;
-	int       i;
+	int i;
 
 	LASSERT(conn->ibc_rxs != NULL);
 	LASSERT(conn->ibc_hdev != NULL);
@@ -1153,14 +1153,13 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
 
 void kiblnd_map_rx_descs(kib_conn_t *conn)
 {
-	kib_rx_t       *rx;
-	struct page    *pg;
-	int	     pg_off;
-	int	     ipg;
-	int	     i;
+	kib_rx_t *rx;
+	struct page *pg;
+	int pg_off;
+	int ipg;
+	int i;
 
-	for (pg_off = ipg = i = 0;
-	     i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+	for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
 		pg = conn->ibc_rx_pages->ibp_pages[ipg];
 		rx = &conn->ibc_rxs[i];
 
@@ -1192,9 +1191,9 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
 
 static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
 {
-	kib_hca_dev_t  *hdev = tpo->tpo_hdev;
-	kib_tx_t       *tx;
-	int	     i;
+	kib_hca_dev_t *hdev = tpo->tpo_hdev;
+	kib_tx_t *tx;
+	int i;
 
 	LASSERT(tpo->tpo_pool.po_allocated = 0);
 
@@ -1216,8 +1215,8 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
 static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
 {
 	kib_hca_dev_t *hdev;
-	unsigned long  flags;
-	int	    i = 0;
+	unsigned long flags;
+	int i = 0;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 	while (dev->ibd_failover) {
@@ -1240,15 +1239,15 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
 
 static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
 {
-	kib_pages_t    *txpgs = tpo->tpo_tx_pages;
-	kib_pool_t     *pool  = &tpo->tpo_pool;
-	kib_net_t      *net   = pool->po_owner->ps_net;
-	kib_dev_t      *dev;
-	struct page    *page;
-	kib_tx_t       *tx;
-	int	     page_offset;
-	int	     ipage;
-	int	     i;
+	kib_pages_t *txpgs = tpo->tpo_tx_pages;
+	kib_pool_t *pool = &tpo->tpo_pool;
+	kib_net_t *net = pool->po_owner->ps_net;
+	kib_dev_t *dev;
+	struct page *page;
+	kib_tx_t *tx;
+	int page_offset;
+	int ipage;
+	int i;
 
 	LASSERT(net != NULL);
 
@@ -1291,7 +1290,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
 
 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
 {
-	__u64   index;
+	__u64 index;
 
 	LASSERT(hdev->ibh_mrs[0] != NULL);
 
@@ -1311,7 +1310,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
 {
 	struct ib_mr *prev_mr;
 	struct ib_mr *mr;
-	int	   i;
+	int i;
 
 	LASSERT(hdev->ibh_mrs[0] != NULL);
 
@@ -1382,18 +1381,18 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
 				  kib_fmr_pool_t **pp_fpo)
 {
 	/* FMR pool for RDMA */
-	kib_dev_t	       *dev = fps->fps_net->ibn_dev;
-	kib_fmr_pool_t	  *fpo;
+	kib_dev_t *dev = fps->fps_net->ibn_dev;
+	kib_fmr_pool_t *fpo;
 	struct ib_fmr_pool_param param = {
 		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
-		.page_shift	= PAGE_SHIFT,
-		.access	    = (IB_ACCESS_LOCAL_WRITE |
-				      IB_ACCESS_REMOTE_WRITE),
-		.pool_size	   = fps->fps_pool_size,
+		.page_shift        = PAGE_SHIFT,
+		.access            = (IB_ACCESS_LOCAL_WRITE |
+		                      IB_ACCESS_REMOTE_WRITE),
+		.pool_size         = fps->fps_pool_size,
 		.dirty_watermark   = fps->fps_flush_trigger,
 		.flush_function    = NULL,
-		.flush_arg	 = NULL,
-		.cache	     = !!*kiblnd_tunables.kib_fmr_cache};
+		.flush_arg         = NULL,
+		.cache             = !!*kiblnd_tunables.kib_fmr_cache};
 	int rc;
 
 	LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1454,7 +1453,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
 				   int flush_trigger)
 {
 	kib_fmr_pool_t *fpo;
-	int	     rc;
+	int rc;
 
 	memset(fps, 0, sizeof(kib_fmr_poolset_t));
 
@@ -1485,11 +1484,11 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
 {
 	LIST_HEAD(zombies);
-	kib_fmr_pool_t    *fpo = fmr->fmr_pool;
+	kib_fmr_pool_t *fpo = fmr->fmr_pool;
 	kib_fmr_poolset_t *fps = fpo->fpo_owner;
-	unsigned long	 now = cfs_time_current();
-	kib_fmr_pool_t    *tmp;
-	int		rc;
+	unsigned long now = cfs_time_current();
+	kib_fmr_pool_t *tmp;
+	int rc;
 
 	rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
 	LASSERT(rc = 0);
@@ -1525,9 +1524,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
 			__u64 iov, kib_fmr_t *fmr)
 {
 	struct ib_pool_fmr *pfmr;
-	kib_fmr_pool_t     *fpo;
-	__u64	       version;
-	int		 rc;
+	kib_fmr_pool_t *fpo;
+	__u64 version;
+	int rc;
 
  again:
 	spin_lock(&fps->fps_lock);
@@ -1658,13 +1657,13 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
 			       kib_ps_node_init_t nd_init,
 			       kib_ps_node_fini_t nd_fini)
 {
-	kib_pool_t	*pool;
-	int		rc;
+	kib_pool_t *pool;
+	int rc;
 
 	memset(ps, 0, sizeof(kib_poolset_t));
 
-	ps->ps_cpt	    = cpt;
-	ps->ps_net	  = net;
+	ps->ps_cpt          = cpt;
+	ps->ps_net          = net;
 	ps->ps_pool_create  = po_create;
 	ps->ps_pool_destroy = po_destroy;
 	ps->ps_node_init    = nd_init;
@@ -1698,9 +1697,9 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
 {
 	LIST_HEAD(zombies);
-	kib_poolset_t  *ps = pool->po_owner;
-	kib_pool_t     *tmp;
-	unsigned long      now = cfs_time_current();
+	kib_poolset_t *ps = pool->po_owner;
+	kib_pool_t *tmp;
+	unsigned long now = cfs_time_current();
 
 	spin_lock(&ps->ps_lock);
 
@@ -1727,9 +1726,9 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
 
 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
 {
-	struct list_head	    *node;
-	kib_pool_t	    *pool;
-	int		    rc;
+	struct list_head *node;
+	kib_pool_t *pool;
+	int rc;
 
  again:
 	spin_lock(&ps->ps_lock);
@@ -1789,8 +1788,8 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
 
 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
 {
-	kib_pmr_pool_t      *ppo = pmr->pmr_pool;
-	struct ib_mr	*mr  = pmr->pmr_mr;
+	kib_pmr_pool_t *ppo = pmr->pmr_pool;
+	struct ib_mr *mr = pmr->pmr_mr;
 
 	pmr->pmr_mr = NULL;
 	kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
@@ -1802,9 +1801,9 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
 		    kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
 {
 	kib_phys_mr_t *pmr;
-	struct list_head    *node;
-	int	    rc;
-	int	    i;
+	struct list_head *node;
+	int rc;
+	int i;
 
 	node = kiblnd_pool_alloc_node(&pps->pps_poolset);
 	if (node = NULL) {
@@ -1846,7 +1845,7 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
 static void kiblnd_destroy_pmr_pool(kib_pool_t *pool)
 {
 	kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
-	kib_phys_mr_t  *pmr;
+	kib_phys_mr_t *pmr;
 	kib_phys_mr_t *tmp;
 
 	LASSERT(pool->po_allocated = 0);
@@ -1881,10 +1880,10 @@ static inline int kiblnd_pmr_pool_size(int ncpts)
 static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
 				  kib_pool_t **pp_po)
 {
-	struct kib_pmr_pool	*ppo;
-	struct kib_pool		*pool;
-	kib_phys_mr_t		*pmr;
-	int			i;
+	struct kib_pmr_pool *ppo;
+	struct kib_pool *pool;
+	kib_phys_mr_t *pmr;
+	int i;
 
 	LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
 			 ps->ps_cpt, sizeof(kib_pmr_pool_t));
@@ -1923,8 +1922,8 @@ static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
 
 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
 {
-	kib_tx_pool_t  *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
-	int	     i;
+	kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
+	int i;
 
 	LASSERT(pool->po_allocated = 0);
 
@@ -1979,9 +1978,9 @@ static int kiblnd_tx_pool_size(int ncpts)
 static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
 				 kib_pool_t **pp_po)
 {
-	int	    i;
-	int	    npg;
-	kib_pool_t    *pool;
+	int i;
+	int npg;
+	kib_pool_t *pool;
 	kib_tx_pool_t *tpo;
 
 	LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
@@ -2064,19 +2063,19 @@ static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
 {
 	kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
 					     tps_poolset);
-	kib_tx_t	 *tx  = list_entry(node, kib_tx_t, tx_list);
+	kib_tx_t *tx  = list_entry(node, kib_tx_t, tx_list);
 
 	tx->tx_cookie = tps->tps_next_tx_cookie++;
 }
 
 static void kiblnd_net_fini_pools(kib_net_t *net)
 {
-	int	i;
+	int i;
 
 	cfs_cpt_for_each(i, lnet_cpt_table()) {
-		kib_tx_poolset_t	*tps;
-		kib_fmr_poolset_t	*fps;
-		kib_pmr_poolset_t	*pps;
+		kib_tx_poolset_t *tps;
+		kib_fmr_poolset_t *fps;
+		kib_pmr_poolset_t *pps;
 
 		if (net->ibn_tx_ps != NULL) {
 			tps = net->ibn_tx_ps[i];
@@ -2112,16 +2111,15 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
 
 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
 {
-	unsigned long	flags;
-	int		cpt;
-	int		rc;
-	int		i;
+	unsigned long flags;
+	int cpt;
+	int rc;
+	int i;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 	if (*kiblnd_tunables.kib_map_on_demand = 0 &&
 	    net->ibn_dev->ibd_hdev->ibh_nmrs = 1) {
-		read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-					   flags);
+		read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 		goto create_tx_pool;
 	}
 
@@ -2241,7 +2239,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
 {
 	struct ib_device_attr *attr;
-	int		    rc;
+	int rc;
 
 	/* It's safe to assume a HCA can handle a page size
 	 * matching that of the native system */
@@ -2284,7 +2282,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
 
 static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
 {
-	int     i;
+	int i;
 
 	if (hdev->ibh_nmrs = 0 || hdev->ibh_mrs = NULL)
 		return;
@@ -2317,12 +2315,11 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 {
 	struct ib_mr *mr;
-	int	   i;
-	int	   rc;
-	__u64	 mm_size;
-	__u64	 mr_size;
-	int	   acflags = IB_ACCESS_LOCAL_WRITE |
-				IB_ACCESS_REMOTE_WRITE;
+	int i;
+	int rc;
+	__u64 mm_size;
+	__u64 mr_size;
+	int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
 
 	rc = kiblnd_hdev_get_attr(hdev);
 	if (rc != 0)
@@ -2371,11 +2368,11 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 	for (i = 0; i < hdev->ibh_nmrs; i++) {
 		struct ib_phys_buf ipb;
-		__u64	      iova;
+		__u64 iova;
 
 		ipb.size = hdev->ibh_mr_size;
 		ipb.addr = i * mr_size;
-		iova     = ipb.addr;
+		iova = ipb.addr;
 
 		mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
 		if (IS_ERR(mr)) {
@@ -2406,10 +2403,10 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
 
 static int kiblnd_dev_need_failover(kib_dev_t *dev)
 {
-	struct rdma_cm_id  *cmid;
-	struct sockaddr_in  srcaddr;
-	struct sockaddr_in  dstaddr;
-	int		 rc;
+	struct rdma_cm_id *cmid;
+	struct sockaddr_in srcaddr;
+	struct sockaddr_in dstaddr;
+	int rc;
 
 	if (dev->ibd_hdev = NULL || /* initializing */
 	    dev->ibd_hdev->ibh_cmid = NULL || /* listener is dead */
@@ -2435,7 +2432,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
 	}
 
 	memset(&srcaddr, 0, sizeof(srcaddr));
-	srcaddr.sin_family      = AF_INET;
+	srcaddr.sin_family = AF_INET;
 	srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
 
 	memset(&dstaddr, 0, sizeof(dstaddr));
@@ -2464,15 +2461,15 @@ int kiblnd_dev_failover(kib_dev_t *dev)
 	LIST_HEAD(zombie_tpo);
 	LIST_HEAD(zombie_ppo);
 	LIST_HEAD(zombie_fpo);
-	struct rdma_cm_id  *cmid  = NULL;
-	kib_hca_dev_t      *hdev  = NULL;
-	kib_hca_dev_t      *old;
-	struct ib_pd       *pd;
-	kib_net_t	  *net;
-	struct sockaddr_in  addr;
-	unsigned long       flags;
-	int		 rc = 0;
-	int		    i;
+	struct rdma_cm_id *cmid  = NULL;
+	kib_hca_dev_t *hdev  = NULL;
+	kib_hca_dev_t *old;
+	struct ib_pd *pd;
+	kib_net_t *net;
+	struct sockaddr_in addr;
+	unsigned long flags;
+	int rc = 0;
+	int i;
 
 	LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
 		 dev->ibd_can_failover ||
@@ -2614,11 +2611,11 @@ void kiblnd_destroy_dev(kib_dev_t *dev)
 static kib_dev_t *kiblnd_create_dev(char *ifname)
 {
 	struct net_device *netdev;
-	kib_dev_t	 *dev;
-	__u32	      netmask;
-	__u32	      ip;
-	int		up;
-	int		rc;
+	kib_dev_t *dev;
+	__u32 netmask;
+	__u32 ip;
+	int up;
+	int rc;
 
 	rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
 	if (rc != 0) {
@@ -2665,8 +2662,8 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
 
 static void kiblnd_base_shutdown(void)
 {
-	struct kib_sched_info	*sched;
-	int			i;
+	struct kib_sched_info *sched;
+	int i;
 
 	LASSERT(list_empty(&kiblnd_data.kib_devs));
 
@@ -2732,10 +2729,10 @@ static void kiblnd_base_shutdown(void)
 
 void kiblnd_shutdown(lnet_ni_t *ni)
 {
-	kib_net_t	*net = ni->ni_data;
-	rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
-	int	       i;
-	unsigned long     flags;
+	kib_net_t *net = ni->ni_data;
+	rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+	int i;
+	unsigned long flags;
 
 	LASSERT(kiblnd_data.kib_init = IBLND_INIT_ALL);
 
@@ -2804,9 +2801,9 @@ out:
 
 static int kiblnd_base_startup(void)
 {
-	struct kib_sched_info	*sched;
-	int			rc;
-	int			i;
+	struct kib_sched_info *sched;
+	int rc;
+	int i;
 
 	LASSERT(kiblnd_data.kib_init = IBLND_INIT_NOTHING);
 
@@ -2821,8 +2818,7 @@ static int kiblnd_base_startup(void)
 
 	kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
 	LIBCFS_ALLOC(kiblnd_data.kib_peers,
-		     sizeof(struct list_head) *
-			    kiblnd_data.kib_peer_hash_size);
+		     sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
 	if (kiblnd_data.kib_peers = NULL)
 		goto failed;
 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
@@ -2840,7 +2836,7 @@ static int kiblnd_base_startup(void)
 		goto failed;
 
 	cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
-		int	nthrs;
+		int nthrs;
 
 		spin_lock_init(&sched->ibs_lock);
 		INIT_LIST_HEAD(&sched->ibs_conns);
@@ -2893,9 +2889,9 @@ static int kiblnd_base_startup(void)
 
 static int kiblnd_start_schedulers(struct kib_sched_info *sched)
 {
-	int	rc = 0;
-	int	nthrs;
-	int	i;
+	int rc = 0;
+	int nthrs;
+	int i;
 
 	if (sched->ibs_nthreads = 0) {
 		if (*kiblnd_tunables.kib_nscheds > 0) {
@@ -2913,8 +2909,8 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
 	}
 
 	for (i = 0; i < nthrs; i++) {
-		long	id;
-		char	name[20];
+		long id;
+		char name[20];
 
 		id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
 		snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
@@ -2935,9 +2931,9 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
 static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
 				    int ncpts)
 {
-	int	cpt;
-	int	rc;
-	int	i;
+	int cpt;
+	int rc;
+	int i;
 
 	for (i = 0; i < ncpts; i++) {
 		struct kib_sched_info *sched;
@@ -2960,10 +2956,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
 
 static kib_dev_t *kiblnd_dev_search(char *ifname)
 {
-	kib_dev_t	*alias = NULL;
-	kib_dev_t	*dev;
-	char		*colon;
-	char		*colon2;
+	kib_dev_t *alias = NULL;
+	kib_dev_t *dev;
+	char *colon;
+	char *colon2;
 
 	colon = strchr(ifname, ':');
 	list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
@@ -2992,13 +2988,13 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
 
 int kiblnd_startup(lnet_ni_t *ni)
 {
-	char		     *ifname;
-	kib_dev_t		*ibdev = NULL;
-	kib_net_t		*net;
-	struct timeval	    tv;
-	unsigned long	     flags;
-	int		       rc;
-	int			  newdev;
+	char *ifname;
+	kib_dev_t *ibdev = NULL;
+	kib_net_t *net;
+	struct timeval tv;
+	unsigned long flags;
+	int rc;
+	int newdev;
 
 	LASSERT(ni->ni_lnd = &the_o2iblnd);
 
@@ -3091,7 +3087,7 @@ static void __exit kiblnd_module_fini(void)
 
 static int __init kiblnd_module_init(void)
 {
-	int    rc;
+	int rc;
 
 	CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
 	CLASSERT(offsetof(kib_msg_t,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index cd664d0..7f52c69 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -80,42 +80,47 @@
 #define IBLND_N_SCHED_HIGH		4
 
 typedef struct {
-	int	      *kib_dev_failover;     /* HCA failover */
-	unsigned int     *kib_service;	  /* IB service number */
-	int	      *kib_min_reconnect_interval; /* first failed connection retry... */
-	int	      *kib_max_reconnect_interval; /* ...exponentially increasing to this */
-	int	      *kib_cksum;	    /* checksum kib_msg_t? */
-	int	      *kib_timeout;	  /* comms timeout (seconds) */
-	int	      *kib_keepalive;	/* keepalive timeout (seconds) */
-	int	      *kib_ntx;	      /* # tx descs */
-	int	      *kib_credits;	  /* # concurrent sends */
-	int	      *kib_peertxcredits;    /* # concurrent sends to 1 peer */
-	int	      *kib_peerrtrcredits;   /* # per-peer router buffer credits */
-	int	      *kib_peercredits_hiw;  /* # when eagerly to return credits */
-	int	      *kib_peertimeout;      /* seconds to consider peer dead */
-	char	    **kib_default_ipif;     /* default IPoIB interface */
-	int	      *kib_retry_count;
-	int	      *kib_rnr_retry_count;
-	int	      *kib_concurrent_sends; /* send work queue sizing */
-	int		 *kib_ib_mtu;		/* IB MTU */
-	int	      *kib_map_on_demand;    /* map-on-demand if RD has more fragments
-						 * than this value, 0 disable map-on-demand */
-	int	      *kib_pmr_pool_size;    /* # physical MR in pool */
-	int	      *kib_fmr_pool_size;    /* # FMRs in pool */
-	int	      *kib_fmr_flush_trigger; /* When to trigger FMR flush */
-	int	      *kib_fmr_cache;	/* enable FMR pool cache? */
-	int	      *kib_require_priv_port;/* accept only privileged ports */
-	int	      *kib_use_priv_port;    /* use privileged port for active connect */
-	/* # threads on each CPT */
-	int		 *kib_nscheds;
+	int          *kib_dev_failover;      /* HCA failover */
+	unsigned int *kib_service;           /* IB service number */
+	int          *kib_min_reconnect_interval; /* first failed connection
+						   * retry... */
+	int          *kib_max_reconnect_interval; /* ...exponentially increasing
+						   * to this */
+	int          *kib_cksum;             /* checksum kib_msg_t? */
+	int          *kib_timeout;           /* comms timeout (seconds) */
+	int          *kib_keepalive;         /* keepalive timeout (seconds) */
+	int          *kib_ntx;               /* # tx descs */
+	int          *kib_credits;           /* # concurrent sends */
+	int          *kib_peertxcredits;     /* # concurrent sends to 1 peer */
+	int          *kib_peerrtrcredits;    /* # per-peer router buffer
+					      * credits */
+	int          *kib_peercredits_hiw;   /* # when eagerly to return
+					      * credits */
+	int          *kib_peertimeout;       /* seconds to consider peer dead */
+	char         **kib_default_ipif;     /* default IPoIB interface */
+	int          *kib_retry_count;
+	int          *kib_rnr_retry_count;
+	int          *kib_concurrent_sends;  /* send work queue sizing */
+	int          *kib_ib_mtu;            /* IB MTU */
+	int          *kib_map_on_demand;     /* map-on-demand if RD has more
+					      * fragments than this value, 0
+					      * disable map-on-demand */
+	int          *kib_pmr_pool_size;     /* # physical MR in pool */
+	int          *kib_fmr_pool_size;     /* # FMRs in pool */
+	int          *kib_fmr_flush_trigger; /* When to trigger FMR flush */
+	int          *kib_fmr_cache;         /* enable FMR pool cache? */
+	int          *kib_require_priv_port; /* accept only privileged ports */
+	int          *kib_use_priv_port;     /* use privileged port for active
+					      * connect */
+	int          *kib_nscheds;           /* # threads on each CPT */
 } kib_tunables_t;
 
 extern kib_tunables_t  kiblnd_tunables;
 
-#define IBLND_MSG_QUEUE_SIZE_V1      8	  /* V1 only : # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER_V1    7	  /* V1 only : when eagerly to return credits */
+#define IBLND_MSG_QUEUE_SIZE_V1   8 /* V1 only : # messages/RDMAs in-flight */
+#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
 
-#define IBLND_CREDITS_DEFAULT	8	  /* default # of peer credits */
+#define IBLND_CREDITS_DEFAULT     8 /* default # of peer credits */
 #define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
 #define IBLND_MSG_QUEUE_SIZE(v)    ((v) = IBLND_MSG_VERSION_1 ? \
@@ -186,34 +191,36 @@ struct kib_hca_dev;
 #endif
 
 typedef struct {
-	struct list_head	   ibd_list;	  /* chain on kib_devs */
-	struct list_head	   ibd_fail_list;     /* chain on kib_failed_devs */
-	__u32		ibd_ifip;	  /* IPoIB interface IP */
-	/** IPoIB interface name */
-	char		 ibd_ifname[KIB_IFNAME_SIZE];
-	int		  ibd_nnets;	 /* # nets extant */
-
-	unsigned long	   ibd_next_failover;
-	int		  ibd_failed_failover; /* # failover failures */
-	unsigned int	 ibd_failover;      /* failover in progress */
-	unsigned int	 ibd_can_failover;  /* IPoIB interface is a bonding master */
-	struct list_head	   ibd_nets;
-	struct kib_hca_dev  *ibd_hdev;
+	struct list_head   ibd_list;            /* chain on kib_devs */
+	struct list_head   ibd_fail_list;       /* chain on kib_failed_devs */
+	__u32              ibd_ifip;            /* IPoIB interface IP */
+
+	/* IPoIB interface name */
+	char               ibd_ifname[KIB_IFNAME_SIZE];
+	int                ibd_nnets;           /* # nets extant */
+
+	unsigned long      ibd_next_failover;
+	int                ibd_failed_failover; /* # failover failures */
+	unsigned int       ibd_failover;        /* failover in progress */
+	unsigned int       ibd_can_failover;    /* IPoIB interface is a bonding
+						 * master */
+	struct list_head   ibd_nets;
+	struct kib_hca_dev *ibd_hdev;
 } kib_dev_t;
 
 typedef struct kib_hca_dev {
-	struct rdma_cm_id   *ibh_cmid;	  /* listener cmid */
-	struct ib_device    *ibh_ibdev;	 /* IB device */
-	int		  ibh_page_shift;    /* page shift of current HCA */
-	int		  ibh_page_size;     /* page size of current HCA */
-	__u64		ibh_page_mask;     /* page mask of current HCA */
-	int		  ibh_mr_shift;      /* bits shift of max MR size */
-	__u64		ibh_mr_size;       /* size of MR */
-	int		  ibh_nmrs;	  /* # of global MRs */
-	struct ib_mr       **ibh_mrs;	   /* global MR */
-	struct ib_pd	*ibh_pd;	    /* PD */
-	kib_dev_t	   *ibh_dev;	   /* owner */
-	atomic_t	 ibh_ref;	   /* refcount */
+	struct rdma_cm_id  *ibh_cmid;           /* listener cmid */
+	struct ib_device   *ibh_ibdev;          /* IB device */
+	int                ibh_page_shift;      /* page shift of current HCA */
+	int                ibh_page_size;       /* page size of current HCA */
+	__u64              ibh_page_mask;       /* page mask of current HCA */
+	int                ibh_mr_shift;        /* bits shift of max MR size */
+	__u64              ibh_mr_size;         /* size of MR */
+	int                ibh_nmrs;            /* # of global MRs */
+	struct ib_mr       **ibh_mrs;           /* global MR */
+	struct ib_pd       *ibh_pd;             /* PD */
+	kib_dev_t          *ibh_dev;            /* owner */
+	atomic_t           ibh_ref;             /* refcount */
 } kib_hca_dev_t;
 
 /** # of seconds to keep pool alive */
@@ -222,19 +229,19 @@ typedef struct kib_hca_dev {
 #define IBLND_POOL_RETRY	1
 
 typedef struct {
-	int		     ibp_npages;	     /* # pages */
-	struct page	    *ibp_pages[0];	   /* page array */
+	int                ibp_npages;          /* # pages */
+	struct page        *ibp_pages[0];       /* page array */
 } kib_pages_t;
 
 struct kib_pmr_pool;
 
 typedef struct {
-	struct list_head	      pmr_list;	       /* chain node */
-	struct ib_phys_buf     *pmr_ipb;		/* physical buffer */
-	struct ib_mr	   *pmr_mr;		 /* IB MR */
-	struct kib_pmr_pool    *pmr_pool;	       /* owner of this MR */
-	__u64		   pmr_iova;	       /* Virtual I/O address */
-	int		     pmr_refcount;	   /* reference count */
+	struct list_head    pmr_list;           /* chain node */
+	struct ib_phys_buf  *pmr_ipb;           /* physical buffer */
+	struct ib_mr        *pmr_mr;            /* IB MR */
+	struct kib_pmr_pool *pmr_pool;          /* owner of this MR */
+	__u64               pmr_iova;           /* Virtual I/O address */
+	int                 pmr_refcount;       /* reference count */
 } kib_phys_mr_t;
 
 struct kib_pool;
@@ -251,97 +258,99 @@ struct kib_net;
 #define IBLND_POOL_NAME_LEN     32
 
 typedef struct kib_poolset {
-	spinlock_t		ps_lock;		/* serialize */
-	struct kib_net	 *ps_net;		 /* network it belongs to */
-	char		    ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
-	struct list_head	      ps_pool_list;	   /* list of pools */
-	struct list_head	      ps_failed_pool_list;    /* failed pool list */
-	unsigned long	      ps_next_retry;	  /* time stamp for retry if failed to allocate */
-	int		     ps_increasing;	  /* is allocating new pool */
-	int		     ps_pool_size;	   /* new pool size */
-	int			ps_cpt;			/* CPT id */
-
-	kib_ps_pool_create_t    ps_pool_create;	 /* create a new pool */
-	kib_ps_pool_destroy_t   ps_pool_destroy;	/* destroy a pool */
-	kib_ps_node_init_t      ps_node_init;	   /* initialize new allocated node */
-	kib_ps_node_fini_t      ps_node_fini;	   /* finalize node */
+	spinlock_t            ps_lock;            /* serialize */
+	struct kib_net        *ps_net;            /* network it belongs to */
+	char                  ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
+	struct list_head      ps_pool_list;       /* list of pools */
+	struct list_head      ps_failed_pool_list;/* failed pool list */
+	unsigned long         ps_next_retry;      /* time stamp for retry if
+						   * failed to allocate */
+	int                   ps_increasing;      /* is allocating new pool */
+	int                   ps_pool_size;       /* new pool size */
+	int                   ps_cpt;             /* CPT id */
+
+	kib_ps_pool_create_t  ps_pool_create;     /* create a new pool */
+	kib_ps_pool_destroy_t ps_pool_destroy;    /* destroy a pool */
+	kib_ps_node_init_t    ps_node_init;       /* initialize new allocated
+						   * node */
+	kib_ps_node_fini_t    ps_node_fini;       /* finalize node */
 } kib_poolset_t;
 
 typedef struct kib_pool {
-	struct list_head	      po_list;		/* chain on pool list */
-	struct list_head	      po_free_list;	   /* pre-allocated node */
-	kib_poolset_t	  *po_owner;	       /* pool_set of this pool */
-	unsigned long	      po_deadline;	    /* deadline of this pool */
-	int		     po_allocated;	   /* # of elements in use */
-	int		     po_failed;	      /* pool is created on failed HCA */
-	int		     po_size;		/* # of pre-allocated elements */
+	struct list_head      po_list;         /* chain on pool list */
+	struct list_head      po_free_list;    /* pre-allocated node */
+	kib_poolset_t         *po_owner;       /* pool_set of this pool */
+	unsigned long         po_deadline;     /* deadline of this pool */
+	int                   po_allocated;    /* # of elements in use */
+	int                   po_failed;       /* pool is created on failed
+						* HCA */
+	int                   po_size;         /* # of pre-allocated elements */
 } kib_pool_t;
 
 typedef struct {
-	kib_poolset_t	   tps_poolset;	    /* pool-set */
-	__u64		   tps_next_tx_cookie;     /* cookie of TX */
+	kib_poolset_t         tps_poolset;        /* pool-set */
+	__u64                 tps_next_tx_cookie; /* cookie of TX */
 } kib_tx_poolset_t;
 
 typedef struct {
-	kib_pool_t	      tpo_pool;	       /* pool */
-	struct kib_hca_dev     *tpo_hdev;	       /* device for this pool */
-	struct kib_tx	  *tpo_tx_descs;	   /* all the tx descriptors */
-	kib_pages_t	    *tpo_tx_pages;	   /* premapped tx msg pages */
+	kib_pool_t            tpo_pool;           /* pool */
+	struct kib_hca_dev    *tpo_hdev;          /* device for this pool */
+	struct kib_tx         *tpo_tx_descs;      /* all the tx descriptors */
+	kib_pages_t           *tpo_tx_pages;      /* premapped tx msg pages */
 } kib_tx_pool_t;
 
 typedef struct {
-	kib_poolset_t	   pps_poolset;	    /* pool-set */
+	kib_poolset_t         pps_poolset;        /* pool-set */
 } kib_pmr_poolset_t;
 
 typedef struct kib_pmr_pool {
-	struct kib_hca_dev     *ppo_hdev;	       /* device for this pool */
-	kib_pool_t	      ppo_pool;	       /* pool */
+	struct kib_hca_dev    *ppo_hdev;          /* device for this pool */
+	kib_pool_t            ppo_pool;           /* pool */
 } kib_pmr_pool_t;
 
 typedef struct {
-	spinlock_t		fps_lock;		/* serialize */
-	struct kib_net	 *fps_net;		/* IB network */
-	struct list_head	      fps_pool_list;	  /* FMR pool list */
-	struct list_head	      fps_failed_pool_list;   /* FMR pool list */
-	__u64		   fps_version;	    /* validity stamp */
-	int			fps_cpt;		/* CPT id */
-	int			fps_pool_size;
-	int			fps_flush_trigger;
-	/* is allocating new pool */
-	int			fps_increasing;
-	/* time stamp for retry if failed to allocate */
-	unsigned long		fps_next_retry;
+	spinlock_t            fps_lock;            /* serialize */
+	struct kib_net        *fps_net;            /* IB network */
+	struct list_head      fps_pool_list;       /* FMR pool list */
+	struct list_head      fps_failed_pool_list;/* FMR pool list */
+	__u64                 fps_version;         /* validity stamp */
+	int                   fps_cpt;             /* CPT id */
+	int                   fps_pool_size;
+	int                   fps_flush_trigger;
+	int                   fps_increasing;      /* is allocating new pool */
+	unsigned long         fps_next_retry;      /* time stamp for retry if
+						    * failed to allocate */
 } kib_fmr_poolset_t;
 
 typedef struct {
-	struct list_head	      fpo_list;	       /* chain on pool list */
-	struct kib_hca_dev     *fpo_hdev;	       /* device for this pool */
-	kib_fmr_poolset_t      *fpo_owner;	      /* owner of this pool */
-	struct ib_fmr_pool     *fpo_fmr_pool;	   /* IB FMR pool */
-	unsigned long	      fpo_deadline;	   /* deadline of this pool */
-	int		     fpo_failed;	     /* fmr pool is failed */
-	int		     fpo_map_count;	  /* # of mapped FMR */
+	struct list_head      fpo_list;            /* chain on pool list */
+	struct kib_hca_dev    *fpo_hdev;           /* device for this pool */
+	kib_fmr_poolset_t     *fpo_owner;          /* owner of this pool */
+	struct ib_fmr_pool    *fpo_fmr_pool;       /* IB FMR pool */
+	unsigned long         fpo_deadline;        /* deadline of this pool */
+	int                   fpo_failed;          /* fmr pool is failed */
+	int                   fpo_map_count;       /* # of mapped FMR */
 } kib_fmr_pool_t;
 
 typedef struct {
-	struct ib_pool_fmr     *fmr_pfmr;	       /* IB pool fmr */
-	kib_fmr_pool_t	 *fmr_pool;	       /* pool of FMR */
+	struct ib_pool_fmr    *fmr_pfmr;           /* IB pool fmr */
+	kib_fmr_pool_t        *fmr_pool;           /* pool of FMR */
 } kib_fmr_t;
 
 typedef struct kib_net {
-	struct list_head	   ibn_list;	  /* chain on kib_dev_t::ibd_nets */
-	__u64		ibn_incarnation;   /* my epoch */
-	int		  ibn_init;	  /* initialisation state */
-	int		  ibn_shutdown;      /* shutting down? */
+	struct list_head      ibn_list;       /* chain on kib_dev_t::ibd_nets */
+	__u64                 ibn_incarnation;/* my epoch */
+	int                   ibn_init;       /* initialisation state */
+	int                   ibn_shutdown;   /* shutting down? */
 
-	atomic_t		ibn_npeers;	/* # peers extant */
-	atomic_t		ibn_nconns;	/* # connections extant */
+	atomic_t              ibn_npeers;     /* # peers extant */
+	atomic_t              ibn_nconns;     /* # connections extant */
 
-	kib_tx_poolset_t	**ibn_tx_ps;	/* tx pool-set */
-	kib_fmr_poolset_t	**ibn_fmr_ps;	/* fmr pool-set */
-	kib_pmr_poolset_t	**ibn_pmr_ps;	/* pmr pool-set */
+	kib_tx_poolset_t      **ibn_tx_ps;    /* tx pool-set */
+	kib_fmr_poolset_t     **ibn_fmr_ps;   /* fmr pool-set */
+	kib_pmr_poolset_t     **ibn_pmr_ps;   /* pmr pool-set */
 
-	kib_dev_t		*ibn_dev;	/* underlying IB device */
+	kib_dev_t             *ibn_dev;       /* underlying IB device */
 } kib_net_t;
 
 #define KIB_THREAD_SHIFT		16
@@ -350,51 +359,45 @@ typedef struct kib_net {
 #define KIB_THREAD_TID(id)		((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
 
 struct kib_sched_info {
-	/* serialise */
-	spinlock_t		ibs_lock;
-	/* schedulers sleep here */
-	wait_queue_head_t		ibs_waitq;
-	/* conns to check for rx completions */
-	struct list_head		ibs_conns;
-	/* number of scheduler threads */
-	int			ibs_nthreads;
-	/* max allowed scheduler threads */
-	int			ibs_nthreads_max;
-	int			ibs_cpt;	/* CPT id */
+	spinlock_t         ibs_lock;     /* serialise */
+	wait_queue_head_t  ibs_waitq;    /* schedulers sleep here */
+	struct list_head   ibs_conns;    /* conns to check for rx completions */
+	int                ibs_nthreads; /* number of scheduler threads */
+	int                ibs_nthreads_max; /* max allowed scheduler threads */
+	int                ibs_cpt;      /* CPT id */
 };
 
 typedef struct {
-	int			kib_init;	/* initialisation state */
-	int			kib_shutdown;	/* shut down? */
-	struct list_head		kib_devs;	/* IB devices extant */
-	/* list head of failed devices */
-	struct list_head		kib_failed_devs;
-	/* schedulers sleep here */
-	wait_queue_head_t		kib_failover_waitq;
-	atomic_t		kib_nthreads;	/* # live threads */
-	/* stabilize net/dev/peer/conn ops */
-	rwlock_t		kib_global_lock;
-	/* hash table of all my known peers */
-	struct list_head		*kib_peers;
-	/* size of kib_peers */
-	int			kib_peer_hash_size;
-	/* the connd task (serialisation assertions) */
-	void			*kib_connd;
-	/* connections to setup/teardown */
-	struct list_head		kib_connd_conns;
-	/* connections with zero refcount */
-	struct list_head		kib_connd_zombies;
-	/* connection daemon sleeps here */
-	wait_queue_head_t		kib_connd_waitq;
-	spinlock_t		kib_connd_lock;	/* serialise */
-	struct ib_qp_attr	kib_error_qpa;	/* QP->ERROR */
-	/* percpt data for schedulers */
-	struct kib_sched_info	**kib_scheds;
+	int                   kib_init;           /* initialisation state */
+	int                   kib_shutdown;       /* shut down? */
+	struct list_head      kib_devs;           /* IB devices extant */
+	struct list_head      kib_failed_devs;    /* list head of failed
+						   * devices */
+	wait_queue_head_t     kib_failover_waitq; /* schedulers sleep here */
+	atomic_t              kib_nthreads;       /* # live threads */
+	rwlock_t              kib_global_lock;    /* stabilize net/dev/peer/conn
+						   * ops */
+	struct list_head      *kib_peers;         /* hash table of all my known
+						   * peers */
+	int                   kib_peer_hash_size; /* size of kib_peers */
+	void                  *kib_connd;         /* the connd task
+						   * (serialisation assertions)
+						   */
+	struct list_head      kib_connd_conns;    /* connections to
+						   * setup/teardown */
+	struct list_head      kib_connd_zombies;  /* connections with zero
+						   * refcount */
+	wait_queue_head_t     kib_connd_waitq;    /* connection daemon sleeps
+						   * here */
+	spinlock_t            kib_connd_lock;     /* serialise */
+	struct ib_qp_attr     kib_error_qpa;      /* QP->ERROR */
+	struct kib_sched_info **kib_scheds;       /* percpt data for schedulers
+						   */
 } kib_data_t;
 
-#define IBLND_INIT_NOTHING	 0
-#define IBLND_INIT_DATA	    1
-#define IBLND_INIT_ALL	     2
+#define IBLND_INIT_NOTHING 0
+#define IBLND_INIT_DATA    1
+#define IBLND_INIT_ALL     2
 
 /************************************************************************
  * IB Wire message format.
@@ -402,228 +405,243 @@ typedef struct {
  */
 
 typedef struct kib_connparams {
-	__u16	     ibcp_queue_depth;
-	__u16	     ibcp_max_frags;
-	__u32	     ibcp_max_msg_size;
+	__u16        ibcp_queue_depth;
+	__u16        ibcp_max_frags;
+	__u32        ibcp_max_msg_size;
 } WIRE_ATTR kib_connparams_t;
 
 typedef struct {
-	lnet_hdr_t	ibim_hdr;	     /* portals header */
-	char	      ibim_payload[0];      /* piggy-backed payload */
+	lnet_hdr_t   ibim_hdr;        /* portals header */
+	char         ibim_payload[0]; /* piggy-backed payload */
 } WIRE_ATTR kib_immediate_msg_t;
 
 typedef struct {
-	__u32	     rf_nob;	       /* # bytes this frag */
-	__u64	     rf_addr;	      /* CAVEAT EMPTOR: misaligned!! */
+	__u32        rf_nob;          /* # bytes this frag */
+	__u64        rf_addr;         /* CAVEAT EMPTOR: misaligned!! */
 } WIRE_ATTR kib_rdma_frag_t;
 
 typedef struct {
-	__u32	     rd_key;	       /* local/remote key */
-	__u32	     rd_nfrags;	    /* # fragments */
-	kib_rdma_frag_t   rd_frags[0];	  /* buffer frags */
+	__u32           rd_key;       /* local/remote key */
+	__u32           rd_nfrags;    /* # fragments */
+	kib_rdma_frag_t rd_frags[0];  /* buffer frags */
 } WIRE_ATTR kib_rdma_desc_t;
 
 typedef struct {
-	lnet_hdr_t	ibprm_hdr;	    /* portals header */
-	__u64	     ibprm_cookie;	 /* opaque completion cookie */
+	lnet_hdr_t      ibprm_hdr;    /* portals header */
+	__u64           ibprm_cookie; /* opaque completion cookie */
 } WIRE_ATTR kib_putreq_msg_t;
 
 typedef struct {
-	__u64	     ibpam_src_cookie;     /* reflected completion cookie */
-	__u64	     ibpam_dst_cookie;     /* opaque completion cookie */
-	kib_rdma_desc_t   ibpam_rd;	     /* sender's sink buffer */
+	__u64           ibpam_src_cookie; /* reflected completion cookie */
+	__u64           ibpam_dst_cookie; /* opaque completion cookie */
+	kib_rdma_desc_t ibpam_rd;         /* sender's sink buffer */
 } WIRE_ATTR kib_putack_msg_t;
 
 typedef struct {
-	lnet_hdr_t	ibgm_hdr;	     /* portals header */
-	__u64	     ibgm_cookie;	  /* opaque completion cookie */
-	kib_rdma_desc_t   ibgm_rd;	      /* rdma descriptor */
+	lnet_hdr_t      ibgm_hdr;     /* portals header */
+	__u64           ibgm_cookie;  /* opaque completion cookie */
+	kib_rdma_desc_t ibgm_rd;      /* rdma descriptor */
 } WIRE_ATTR kib_get_msg_t;
 
 typedef struct {
-	__u64	     ibcm_cookie;	  /* opaque completion cookie */
-	__s32	     ibcm_status;	  /* < 0 failure: >= 0 length */
+	__u64           ibcm_cookie;  /* opaque completion cookie */
+	__s32           ibcm_status;  /* < 0 failure: >= 0 length */
 } WIRE_ATTR kib_completion_msg_t;
 
 typedef struct {
 	/* First 2 fields fixed FOR ALL TIME */
-	__u32	     ibm_magic;	    /* I'm an ibnal message */
-	__u16	     ibm_version;	  /* this is my version number */
-
-	__u8	      ibm_type;	     /* msg type */
-	__u8	      ibm_credits;	  /* returned credits */
-	__u32	     ibm_nob;	      /* # bytes in whole message */
-	__u32	     ibm_cksum;	    /* checksum (0 = no checksum) */
-	__u64	     ibm_srcnid;	   /* sender's NID */
-	__u64	     ibm_srcstamp;	 /* sender's incarnation */
-	__u64	     ibm_dstnid;	   /* destination's NID */
-	__u64	     ibm_dststamp;	 /* destination's incarnation */
+	__u32           ibm_magic;    /* I'm an ibnal message */
+	__u16           ibm_version;  /* this is my version number */
+
+	__u8            ibm_type;     /* msg type */
+	__u8            ibm_credits;  /* returned credits */
+	__u32           ibm_nob;      /* # bytes in whole message */
+	__u32           ibm_cksum;    /* checksum (0 = no checksum) */
+	__u64           ibm_srcnid;   /* sender's NID */
+	__u64           ibm_srcstamp; /* sender's incarnation */
+	__u64           ibm_dstnid;   /* destination's NID */
+	__u64           ibm_dststamp; /* destination's incarnation */
 
 	union {
-		kib_connparams_t      connparams;
-		kib_immediate_msg_t   immediate;
-		kib_putreq_msg_t      putreq;
-		kib_putack_msg_t      putack;
-		kib_get_msg_t	 get;
-		kib_completion_msg_t  completion;
+		kib_connparams_t     connparams;
+		kib_immediate_msg_t  immediate;
+		kib_putreq_msg_t     putreq;
+		kib_putack_msg_t     putack;
+		kib_get_msg_t        get;
+		kib_completion_msg_t completion;
 	} WIRE_ATTR ibm_u;
 } WIRE_ATTR kib_msg_t;
 
-#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC	/* unique magic */
+#define IBLND_MSG_MAGIC     LNET_PROTO_IB_MAGIC /* unique magic */
 
-#define IBLND_MSG_VERSION_1	 0x11
-#define IBLND_MSG_VERSION_2	 0x12
-#define IBLND_MSG_VERSION	   IBLND_MSG_VERSION_2
+#define IBLND_MSG_VERSION_1 0x11
+#define IBLND_MSG_VERSION_2 0x12
+#define IBLND_MSG_VERSION   IBLND_MSG_VERSION_2
 
-#define IBLND_MSG_CONNREQ	   0xc0	/* connection request */
-#define IBLND_MSG_CONNACK	   0xc1	/* connection acknowledge */
-#define IBLND_MSG_NOOP	      0xd0	/* nothing (just credits) */
-#define IBLND_MSG_IMMEDIATE	 0xd1	/* immediate */
-#define IBLND_MSG_PUT_REQ	   0xd2	/* putreq (src->sink) */
-#define IBLND_MSG_PUT_NAK	   0xd3	/* completion (sink->src) */
-#define IBLND_MSG_PUT_ACK	   0xd4	/* putack (sink->src) */
-#define IBLND_MSG_PUT_DONE	  0xd5	/* completion (src->sink) */
-#define IBLND_MSG_GET_REQ	   0xd6	/* getreq (sink->src) */
-#define IBLND_MSG_GET_DONE	  0xd7	/* completion (src->sink: all OK) */
+#define IBLND_MSG_CONNREQ   0xc0	/* connection request */
+#define IBLND_MSG_CONNACK   0xc1	/* connection acknowledge */
+#define IBLND_MSG_NOOP      0xd0	/* nothing (just credits) */
+#define IBLND_MSG_IMMEDIATE 0xd1	/* immediate */
+#define IBLND_MSG_PUT_REQ   0xd2	/* putreq (src->sink) */
+#define IBLND_MSG_PUT_NAK   0xd3	/* completion (sink->src) */
+#define IBLND_MSG_PUT_ACK   0xd4	/* putack (sink->src) */
+#define IBLND_MSG_PUT_DONE  0xd5	/* completion (src->sink) */
+#define IBLND_MSG_GET_REQ   0xd6	/* getreq (sink->src) */
+#define IBLND_MSG_GET_DONE  0xd7	/* completion (src->sink: all OK) */
 
 typedef struct {
-	__u32	    ibr_magic;	     /* sender's magic */
-	__u16	    ibr_version;	   /* sender's version */
-	__u8	     ibr_why;	       /* reject reason */
-	__u8	     ibr_padding;	   /* padding */
-	__u64	    ibr_incarnation;       /* incarnation of peer */
-	kib_connparams_t ibr_cp;		/* connection parameters */
+	__u32            ibr_magic;       /* sender's magic */
+	__u16            ibr_version;     /* sender's version */
+	__u8             ibr_why;         /* reject reason */
+	__u8             ibr_padding;     /* padding */
+	__u64            ibr_incarnation; /* incarnation of peer */
+	kib_connparams_t ibr_cp;          /* connection parameters */
 } WIRE_ATTR kib_rej_t;
 
 /* connection rejection reasons */
-#define IBLND_REJECT_CONN_RACE       1	  /* You lost connection race */
-#define IBLND_REJECT_NO_RESOURCES    2	  /* Out of memory/conns etc */
-#define IBLND_REJECT_FATAL	   3	  /* Anything else */
-
-#define IBLND_REJECT_CONN_UNCOMPAT   4	  /* incompatible version peer */
-#define IBLND_REJECT_CONN_STALE      5	  /* stale peer */
-
-#define IBLND_REJECT_RDMA_FRAGS      6	  /* Fatal: peer's rdma frags can't match mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE  7	  /* Fatal: peer's msg queue size can't match mine */
+#define IBLND_REJECT_CONN_RACE      1 /* You lost connection race */
+#define IBLND_REJECT_NO_RESOURCES   2 /* Out of memory/conns etc */
+#define IBLND_REJECT_FATAL          3 /* Anything else */
+#define IBLND_REJECT_CONN_UNCOMPAT  4 /* incompatible version peer */
+#define IBLND_REJECT_CONN_STALE     5 /* stale peer */
+#define IBLND_REJECT_RDMA_FRAGS     6 /* Fatal: peer's rdma frags can't match
+				       * mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't
+				       * match mine */
 
 /***********************************************************************/
 
-typedef struct kib_rx			   /* receive message */
+typedef struct kib_rx                         /* receive message */
 {
-	struct list_head		rx_list;      /* queue for attention */
-	struct kib_conn	  *rx_conn;      /* owning conn */
-	int		       rx_nob;       /* # bytes received (-1 while posted) */
-	enum ib_wc_status	 rx_status;    /* completion status */
-	kib_msg_t		*rx_msg;       /* message buffer (host vaddr) */
-	__u64		     rx_msgaddr;   /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR   (rx_msgunmap); /* for dma_unmap_single() */
-	struct ib_recv_wr	 rx_wrq;       /* receive work item... */
-	struct ib_sge	     rx_sge;       /* ...and its memory */
+	struct list_head       rx_list;       /* queue for attention */
+	struct kib_conn        *rx_conn;      /* owning conn */
+	int                    rx_nob;        /* # bytes received (-1 while
+					       * posted) */
+	enum ib_wc_status      rx_status;     /* completion status */
+	kib_msg_t              *rx_msg;       /* message buffer (host vaddr) */
+	__u64                  rx_msgaddr;    /* message buffer (I/O addr) */
+	DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+	struct ib_recv_wr      rx_wrq;        /* receive work item... */
+	struct ib_sge          rx_sge;        /* ...and its memory */
 } kib_rx_t;
 
-#define IBLND_POSTRX_DONT_POST    0	     /* don't post */
-#define IBLND_POSTRX_NO_CREDIT    1	     /* post: no credits */
-#define IBLND_POSTRX_PEER_CREDIT  2	     /* post: give peer back 1 credit */
-#define IBLND_POSTRX_RSRVD_CREDIT 3	     /* post: give myself back 1 reserved credit */
+#define IBLND_POSTRX_DONT_POST    0   /* don't post */
+#define IBLND_POSTRX_NO_CREDIT    1   /* post: no credits */
+#define IBLND_POSTRX_PEER_CREDIT  2   /* post: give peer back 1 credit */
+#define IBLND_POSTRX_RSRVD_CREDIT 3   /* post: give myself back 1 reserved
+				       * credit */
 
-typedef struct kib_tx			   /* transmit message */
+typedef struct kib_tx                         /* transmit message */
 {
-	struct list_head		tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
-	kib_tx_pool_t	    *tx_pool;      /* pool I'm from */
-	struct kib_conn	  *tx_conn;      /* owning conn */
-	short		     tx_sending;   /* # tx callbacks outstanding */
-	short		     tx_queued;    /* queued for sending */
-	short		     tx_waiting;   /* waiting for peer */
-	int		       tx_status;    /* LNET completion status */
-	unsigned long	     tx_deadline;  /* completion deadline */
-	__u64		     tx_cookie;    /* completion cookie */
-	lnet_msg_t	       *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
-	kib_msg_t		*tx_msg;       /* message buffer (host vaddr) */
-	__u64		     tx_msgaddr;   /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR   (tx_msgunmap); /* for dma_unmap_single() */
-	int		       tx_nwrq;      /* # send work items */
-	struct ib_send_wr	*tx_wrq;       /* send work items... */
-	struct ib_sge	    *tx_sge;       /* ...and their memory */
-	kib_rdma_desc_t	  *tx_rd;	/* rdma descriptor */
-	int		       tx_nfrags;    /* # entries in... */
-	struct scatterlist       *tx_frags;     /* dma_map_sg descriptor */
-	__u64		    *tx_pages;     /* rdma phys page addrs */
+	struct list_head       tx_list;       /* queue on idle_txs ibc_tx_queue
+					       * etc. */
+	kib_tx_pool_t          *tx_pool;      /* pool I'm from */
+	struct kib_conn        *tx_conn;      /* owning conn */
+	short                  tx_sending;    /* # tx callbacks outstanding */
+	short                  tx_queued;     /* queued for sending */
+	short                  tx_waiting;    /* waiting for peer */
+	int                    tx_status;     /* LNET completion status */
+	unsigned long          tx_deadline;   /* completion deadline */
+	__u64                  tx_cookie;     /* completion cookie */
+	lnet_msg_t             *tx_lntmsg[2]; /* lnet msgs to finalize on
+					       * completion */
+	kib_msg_t              *tx_msg;       /* message buffer (host vaddr) */
+	__u64                  tx_msgaddr;    /* message buffer (I/O addr) */
+	DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+	int                    tx_nwrq;       /* # send work items */
+	struct ib_send_wr      *tx_wrq;       /* send work items... */
+	struct ib_sge          *tx_sge;       /* ...and their memory */
+	kib_rdma_desc_t        *tx_rd;        /* rdma descriptor */
+	int                    tx_nfrags;     /* # entries in... */
+	struct scatterlist     *tx_frags;     /* dma_map_sg descriptor */
+	__u64                  *tx_pages;     /* rdma phys page addrs */
 	union {
-		kib_phys_mr_t      *pmr;	/* MR for physical buffer */
-		kib_fmr_t	   fmr;	/* FMR */
-	}			 tx_u;
-	int		       tx_dmadir;    /* dma direction */
+		kib_phys_mr_t  *pmr;          /* MR for physical buffer */
+		kib_fmr_t      fmr;           /* FMR */
+	}                      tx_u;
+	int                    tx_dmadir;     /* dma direction */
 } kib_tx_t;
 
 typedef struct kib_connvars {
-	/* connection-in-progress variables */
-	kib_msg_t		 cv_msg;
+	kib_msg_t cv_msg; /* connection-in-progress variables */
 } kib_connvars_t;
 
 typedef struct kib_conn {
-	struct kib_sched_info *ibc_sched;	/* scheduler information */
-	struct kib_peer     *ibc_peer;	  /* owning peer */
-	kib_hca_dev_t       *ibc_hdev;	  /* HCA bound on */
-	struct list_head	   ibc_list;	  /* stash on peer's conn list */
-	struct list_head	   ibc_sched_list;    /* schedule for attention */
-	__u16		ibc_version;       /* version of connection */
-	__u64		ibc_incarnation;   /* which instance of the peer */
-	atomic_t	 ibc_refcount;      /* # users */
-	int		  ibc_state;	 /* what's happening */
-	int		  ibc_nsends_posted; /* # uncompleted sends */
-	int		  ibc_noops_posted;  /* # uncompleted NOOPs */
-	int		  ibc_credits;       /* # credits I have */
-	int		  ibc_outstanding_credits; /* # credits to return */
-	int		  ibc_reserved_credits;/* # ACK/DONE msg credits */
-	int		  ibc_comms_error;   /* set on comms error */
-	unsigned int	     ibc_nrx:16;	/* receive buffers owned */
-	unsigned int	     ibc_scheduled:1;   /* scheduled for attention */
-	unsigned int	     ibc_ready:1;       /* CQ callback fired */
-	/* time of last send */
-	unsigned long	ibc_last_send;
-	/** link chain for kiblnd_check_conns only */
-	struct list_head	   ibc_connd_list;
-	/** rxs completed before ESTABLISHED */
-	struct list_head	   ibc_early_rxs;
-	/** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
-	struct list_head	   ibc_tx_noops;
-	struct list_head	   ibc_tx_queue;       /* sends that need a credit */
-	struct list_head	   ibc_tx_queue_nocred;/* sends that don't need a credit */
-	struct list_head	   ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
-	struct list_head	   ibc_active_txs;     /* active tx awaiting completion */
-	spinlock_t	     ibc_lock;		 /* serialise */
-	kib_rx_t	    *ibc_rxs;	    /* the rx descs */
-	kib_pages_t	 *ibc_rx_pages;       /* premapped rx msg pages */
-
-	struct rdma_cm_id   *ibc_cmid;	   /* CM id */
-	struct ib_cq	*ibc_cq;	     /* completion queue */
-
-	kib_connvars_t      *ibc_connvars;       /* in-progress connection state */
+	struct kib_sched_info *ibc_sched;           /* scheduler information */
+	struct kib_peer       *ibc_peer;            /* owning peer */
+	kib_hca_dev_t         *ibc_hdev;            /* HCA bound on */
+	struct list_head      ibc_list;             /* stash on peer's conn
+						     * list */
+	struct list_head      ibc_sched_list;       /* schedule for attention */
+	__u16                 ibc_version;          /* version of connection */
+	__u64                 ibc_incarnation;      /* which instance of the
+						     * peer */
+	atomic_t              ibc_refcount;         /* # users */
+	int                   ibc_state;            /* what's happening */
+	int                   ibc_nsends_posted;    /* # uncompleted sends */
+	int                   ibc_noops_posted;     /* # uncompleted NOOPs */
+	int                   ibc_credits;          /* # credits I have */
+	int                   ibc_outstanding_credits; /* # credits to return */
+	int                   ibc_reserved_credits; /* # ACK/DONE msg credits */
+	int                   ibc_comms_error;      /* set on comms error */
+	unsigned int          ibc_nrx:16;           /* receive buffers owned */
+	unsigned int          ibc_scheduled:1;      /* scheduled for attention
+						     */
+	unsigned int          ibc_ready:1;          /* CQ callback fired */
+	unsigned long         ibc_last_send;        /* time of last send */
+	struct list_head      ibc_connd_list;       /* link chain for
+						     * kiblnd_check_conns only
+						     */
+	struct list_head      ibc_early_rxs;        /* rxs completed before
+						     * ESTABLISHED */
+	struct list_head      ibc_tx_noops;         /* IBLND_MSG_NOOPs for
+						     * IBLND_MSG_VERSION_1 */
+	struct list_head      ibc_tx_queue;         /* sends that need a credit
+						     */
+	struct list_head      ibc_tx_queue_nocred;  /* sends that don't need a
+						     * credit */
+	struct list_head      ibc_tx_queue_rsrvd;   /* sends that need to
+						     * reserve an ACK/DONE msg
+						     */
+	struct list_head      ibc_active_txs;       /* active tx awaiting
+						     * completion */
+	spinlock_t            ibc_lock;             /* serialise */
+	kib_rx_t              *ibc_rxs;             /* the rx descs */
+	kib_pages_t           *ibc_rx_pages;        /* premapped rx msg pages */
+
+	struct rdma_cm_id     *ibc_cmid;            /* CM id */
+	struct ib_cq          *ibc_cq;              /* completion queue */
+
+	kib_connvars_t        *ibc_connvars;        /* in-progress connection
+						     * state */
 } kib_conn_t;
 
-#define IBLND_CONN_INIT	       0	 /* being initialised */
-#define IBLND_CONN_ACTIVE_CONNECT     1	 /* active sending req */
-#define IBLND_CONN_PASSIVE_WAIT       2	 /* passive waiting for rtu */
-#define IBLND_CONN_ESTABLISHED	3	 /* connection established */
-#define IBLND_CONN_CLOSING	    4	 /* being closed */
-#define IBLND_CONN_DISCONNECTED       5	 /* disconnected */
+#define IBLND_CONN_INIT           0	 /* being initialised */
+#define IBLND_CONN_ACTIVE_CONNECT 1	 /* active sending req */
+#define IBLND_CONN_PASSIVE_WAIT   2	 /* passive waiting for rtu */
+#define IBLND_CONN_ESTABLISHED    3	 /* connection established */
+#define IBLND_CONN_CLOSING        4	 /* being closed */
+#define IBLND_CONN_DISCONNECTED   5	 /* disconnected */
 
 typedef struct kib_peer {
-	struct list_head	   ibp_list;	   /* stash on global peer list */
-	lnet_nid_t	   ibp_nid;	    /* who's on the other end(s) */
-	lnet_ni_t	   *ibp_ni;	     /* LNet interface */
-	atomic_t	 ibp_refcount;       /* # users */
-	struct list_head	   ibp_conns;	  /* all active connections */
-	struct list_head	   ibp_tx_queue;       /* msgs waiting for a conn */
-	__u16		ibp_version;	/* version of peer */
-	__u64		ibp_incarnation;    /* incarnation of peer */
-	int		  ibp_connecting;     /* current active connection attempts */
-	int		  ibp_accepting;      /* current passive connection attempts */
-	int		  ibp_error;	  /* errno on closing this peer */
-	unsigned long	   ibp_last_alive;     /* when (in jiffies) I was last alive */
+	struct list_head ibp_list;        /* stash on global peer list */
+	lnet_nid_t       ibp_nid;         /* who's on the other end(s) */
+	lnet_ni_t        *ibp_ni;         /* LNet interface */
+	atomic_t         ibp_refcount;    /* # users */
+	struct list_head ibp_conns;       /* all active connections */
+	struct list_head ibp_tx_queue;    /* msgs waiting for a conn */
+	__u16            ibp_version;     /* version of peer */
+	__u64            ibp_incarnation; /* incarnation of peer */
+	int              ibp_connecting;  /* current active connection attempts
+					   */
+	int              ibp_accepting;   /* current passive connection attempts
+					   */
+	int              ibp_error;       /* errno on closing this peer */
+	unsigned long    ibp_last_alive;  /* when (in jiffies) I was last alive
+					   */
 } kib_peer_t;
 
-extern kib_data_t      kiblnd_data;
+extern kib_data_t kiblnd_data;
 
 extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
 
@@ -941,8 +959,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
  * right because OFED1.2 defines it as const, to use it we have to add
  * (void *) cast to overcome "const" */
 
-#define KIBLND_CONN_PARAM(e)	    ((e)->param.conn.private_data)
-#define KIBLND_CONN_PARAM_LEN(e)	((e)->param.conn.private_data_len)
+#define KIBLND_CONN_PARAM(e)     ((e)->param.conn.private_data)
+#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
 
 
 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index dbf3749..477aa8b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -44,9 +44,9 @@ static void
 kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
 {
 	lnet_msg_t *lntmsg[2];
-	kib_net_t  *net = ni->ni_data;
-	int	 rc;
-	int	 i;
+	kib_net_t *net = ni->ni_data;
+	int rc;
+	int i;
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
@@ -102,10 +102,10 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
 static kib_tx_t *
 kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
 {
-	kib_net_t		*net = (kib_net_t *)ni->ni_data;
-	struct list_head		*node;
-	kib_tx_t		*tx;
-	kib_tx_poolset_t	*tps;
+	kib_net_t *net = (kib_net_t *)ni->ni_data;
+	struct list_head *node;
+	kib_tx_t *tx;
+	kib_tx_poolset_t *tps;
 
 	tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
 	node = kiblnd_pool_alloc_node(&tps->tps_poolset);
@@ -130,9 +130,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
 static void
 kiblnd_drop_rx(kib_rx_t *rx)
 {
-	kib_conn_t		*conn	= rx->rx_conn;
-	struct kib_sched_info	*sched	= conn->ibc_sched;
-	unsigned long		flags;
+	kib_conn_t *conn = rx->rx_conn;
+	struct kib_sched_info *sched = conn->ibc_sched;
+	unsigned long flags;
 
 	spin_lock_irqsave(&sched->ibs_lock, flags);
 	LASSERT(conn->ibc_nrx > 0);
@@ -145,11 +145,11 @@ kiblnd_drop_rx(kib_rx_t *rx)
 int
 kiblnd_post_rx(kib_rx_t *rx, int credit)
 {
-	kib_conn_t	 *conn = rx->rx_conn;
-	kib_net_t	  *net = conn->ibc_peer->ibp_ni->ni_data;
-	struct ib_recv_wr  *bad_wrq = NULL;
-	struct ib_mr       *mr;
-	int		 rc;
+	kib_conn_t *conn = rx->rx_conn;
+	kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+	struct ib_recv_wr *bad_wrq = NULL;
+	struct ib_mr *mr;
+	int rc;
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
@@ -164,10 +164,10 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
 	rx->rx_sge.addr   = rx->rx_msgaddr;
 	rx->rx_sge.length = IBLND_MSG_SIZE;
 
-	rx->rx_wrq.next = NULL;
+	rx->rx_wrq.next    = NULL;
 	rx->rx_wrq.sg_list = &rx->rx_sge;
 	rx->rx_wrq.num_sge = 1;
-	rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
+	rx->rx_wrq.wr_id   = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
 
 	LASSERT(conn->ibc_state >= IBLND_CONN_INIT);
 	LASSERT(rx->rx_nob >= 0);	      /* not posted */
@@ -212,7 +212,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
 static kib_tx_t *
 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 {
-	struct list_head   *tmp;
+	struct list_head *tmp;
 
 	list_for_each(tmp, &conn->ibc_active_txs) {
 		kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
@@ -237,9 +237,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 static void
 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 {
-	kib_tx_t    *tx;
-	lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-	int	  idle;
+	kib_tx_t *tx;
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	int idle;
 
 	spin_lock(&conn->ibc_lock);
 
@@ -276,8 +276,8 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 static void
 kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
 {
-	lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-	kib_tx_t    *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
 
 	if (tx = NULL) {
 		CERROR("Can't get tx for completion %x for %s\n",
@@ -295,14 +295,14 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
 static void
 kiblnd_handle_rx(kib_rx_t *rx)
 {
-	kib_msg_t    *msg = rx->rx_msg;
-	kib_conn_t   *conn = rx->rx_conn;
-	lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
-	int	   credits = msg->ibm_credits;
-	kib_tx_t     *tx;
-	int	   rc = 0;
-	int	   rc2;
-	int	   post_credit;
+	kib_msg_t *msg = rx->rx_msg;
+	kib_conn_t *conn = rx->rx_conn;
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	int credits = msg->ibm_credits;
+	kib_tx_t *tx;
+	int rc = 0;
+	int rc2;
+	int post_credit;
 
 	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -456,12 +456,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
 static void
 kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
 {
-	kib_msg_t    *msg = rx->rx_msg;
-	kib_conn_t   *conn = rx->rx_conn;
-	lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
-	kib_net_t    *net = ni->ni_data;
-	int	   rc;
-	int	   err = -EIO;
+	kib_msg_t *msg = rx->rx_msg;
+	kib_conn_t *conn = rx->rx_conn;
+	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+	kib_net_t *net = ni->ni_data;
+	int rc;
+	int err = -EIO;
 
 	LASSERT(net != NULL);
 	LASSERT(rx->rx_nob < 0);	       /* was posted */
@@ -502,8 +502,8 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
 	/* racing with connection establishment/teardown! */
 
 	if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-		rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
-		unsigned long  flags;
+		rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+		unsigned long flags;
 
 		write_lock_irqsave(g_lock, flags);
 		/* must check holding global lock to eliminate race */
@@ -550,19 +550,19 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
 static int
 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-	kib_hca_dev_t		*hdev;
-	__u64			*pages = tx->tx_pages;
-	kib_fmr_poolset_t	*fps;
-	int			npages;
-	int			size;
-	int			cpt;
-	int			rc;
-	int			i;
+	kib_hca_dev_t *hdev;
+	__u64 *pages = tx->tx_pages;
+	kib_fmr_poolset_t *fps;
+	int npages;
+	int size;
+	int cpt;
+	int rc;
+	int i;
 
 	LASSERT(tx->tx_pool != NULL);
 	LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
 
-	hdev  = tx->tx_pool->tpo_hdev;
+	hdev = tx->tx_pool->tpo_hdev;
 
 	for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
 		for (size = 0; size <  rd->rd_frags[i].rf_nob;
@@ -586,7 +586,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 	rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
 					 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
 	rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
-	rd->rd_frags[0].rf_nob   = nob;
+	rd->rd_frags[0].rf_nob = nob;
 	rd->rd_nfrags = 1;
 
 	return 0;
@@ -595,11 +595,11 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 static int
 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 {
-	kib_hca_dev_t		*hdev;
-	kib_pmr_poolset_t	*pps;
-	__u64			iova;
-	int			cpt;
-	int			rc;
+	kib_hca_dev_t *hdev;
+	kib_pmr_poolset_t *pps;
+	__u64 iova;
+	int cpt;
+	int rc;
 
 	LASSERT(tx->tx_pool != NULL);
 	LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
@@ -623,7 +623,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 					 tx->tx_u.pmr->pmr_mr->lkey;
 	rd->rd_nfrags = 1;
 	rd->rd_frags[0].rf_addr = iova;
-	rd->rd_frags[0].rf_nob  = nob;
+	rd->rd_frags[0].rf_nob = nob;
 
 	return 0;
 }
@@ -631,7 +631,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
 void
 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
 {
-	kib_net_t  *net = ni->ni_data;
+	kib_net_t *net = ni->ni_data;
 
 	LASSERT(net != NULL);
 
@@ -655,20 +655,19 @@ int
 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
 	      kib_rdma_desc_t *rd, int nfrags)
 {
-	kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
-	kib_net_t	  *net   = ni->ni_data;
-	struct ib_mr       *mr    = NULL;
-	__u32	       nob;
-	int		 i;
+	kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+	kib_net_t *net = ni->ni_data;
+	struct ib_mr *mr    = NULL;
+	__u32 nob;
+	int i;
 
 	/* If rd is not tx_rd, it's going to get sent to a peer and I'm the
 	 * RDMA sink */
 	tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 	tx->tx_nfrags = nfrags;
 
-	rd->rd_nfrags -		kiblnd_dma_map_sg(hdev->ibh_ibdev,
-				  tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+	rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
+					  tx->tx_nfrags, tx->tx_dmadir);
 
 	for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
 		rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
@@ -699,12 +698,12 @@ static int
 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 		    unsigned int niov, struct kvec *iov, int offset, int nob)
 {
-	kib_net_t	  *net = ni->ni_data;
-	struct page	*page;
+	kib_net_t *net = ni->ni_data;
+	struct page *page;
 	struct scatterlist *sg;
-	unsigned long       vaddr;
-	int		 fragnob;
-	int		 page_offset;
+	unsigned long vaddr;
+	int fragnob;
+	int page_offset;
 
 	LASSERT(nob > 0);
 	LASSERT(niov > 0);
@@ -752,9 +751,9 @@ static int
 kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 		      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
 {
-	kib_net_t	  *net = ni->ni_data;
+	kib_net_t *net = ni->ni_data;
 	struct scatterlist *sg;
-	int		 fragnob;
+	int fragnob;
 
 	CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
 
@@ -793,11 +792,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 	__releases(conn->ibc_lock)
 	__acquires(conn->ibc_lock)
 {
-	kib_msg_t	 *msg = tx->tx_msg;
-	kib_peer_t	*peer = conn->ibc_peer;
-	int		ver = conn->ibc_version;
-	int		rc;
-	int		done;
+	kib_msg_t *msg = tx->tx_msg;
+	kib_peer_t *peer = conn->ibc_peer;
+	int ver = conn->ibc_version;
+	int rc;
+	int done;
 	struct ib_send_wr *bad_wrq;
 
 	LASSERT(tx->tx_queued);
@@ -878,8 +877,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 		/* close_conn will launch failover */
 		rc = -ENETDOWN;
 	} else {
-		rc = ib_post_send(conn->ibc_cmid->qp,
-				  tx->tx_wrq, &bad_wrq);
+		rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq);
 	}
 
 	conn->ibc_last_send = jiffies;
@@ -925,9 +923,9 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 void
 kiblnd_check_sends(kib_conn_t *conn)
 {
-	int	ver = conn->ibc_version;
+	int ver = conn->ibc_version;
 	lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
-	kib_tx_t  *tx;
+	kib_tx_t *tx;
 
 	/* Don't send anything until after the connection is established */
 	if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -997,9 +995,9 @@ kiblnd_check_sends(kib_conn_t *conn)
 static void
 kiblnd_tx_complete(kib_tx_t *tx, int status)
 {
-	int	   failed = (status != IB_WC_SUCCESS);
-	kib_conn_t   *conn = tx->tx_conn;
-	int	   idle;
+	int failed = (status != IB_WC_SUCCESS);
+	kib_conn_t *conn = tx->tx_conn;
+	int idle;
 
 	LASSERT(tx->tx_sending > 0);
 
@@ -1051,11 +1049,11 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
 void
 kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 {
-	kib_hca_dev_t     *hdev = tx->tx_pool->tpo_hdev;
-	struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
+	kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+	struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
 	struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
-	int		nob = offsetof(kib_msg_t, ibm_u) + body_nob;
-	struct ib_mr      *mr;
+	int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+	struct ib_mr *mr;
 
 	LASSERT(tx->tx_nwrq >= 0);
 	LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
@@ -1086,14 +1084,14 @@ int
 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
 {
-	kib_msg_t	 *ibmsg = tx->tx_msg;
-	kib_rdma_desc_t   *srcrd = tx->tx_rd;
-	struct ib_sge     *sge = &tx->tx_sge[0];
+	kib_msg_t *ibmsg = tx->tx_msg;
+	kib_rdma_desc_t *srcrd = tx->tx_rd;
+	struct ib_sge *sge = &tx->tx_sge[0];
 	struct ib_send_wr *wrq = &tx->tx_wrq[0];
-	int		rc  = resid;
-	int		srcidx;
-	int		dstidx;
-	int		wrknob;
+	int rc  = resid;
+	int srcidx;
+	int dstidx;
+	int wrknob;
 
 	LASSERT(!in_interrupt());
 	LASSERT(tx->tx_nwrq = 0);
@@ -1144,7 +1142,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		wrq->send_flags = 0;
 
 		wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
-		wrq->wr.rdma.rkey	= kiblnd_rd_frag_key(dstrd, dstidx);
+		wrq->wr.rdma.rkey        = kiblnd_rd_frag_key(dstrd, dstidx);
 
 		srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
 		dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
@@ -1170,7 +1168,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 void
 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
 {
-	struct list_head   *q;
+	struct list_head *q;
 
 	LASSERT(tx->tx_nwrq > 0);	      /* work items set up */
 	LASSERT(!tx->tx_queued);	       /* not queued for sending already */
@@ -1271,11 +1269,11 @@ static void
 kiblnd_connect_peer(kib_peer_t *peer)
 {
 	struct rdma_cm_id *cmid;
-	kib_dev_t	 *dev;
-	kib_net_t	 *net = peer->ibp_ni->ni_data;
+	kib_dev_t *dev;
+	kib_net_t *net = peer->ibp_ni->ni_data;
 	struct sockaddr_in srcaddr;
 	struct sockaddr_in dstaddr;
-	int		rc;
+	int rc;
 
 	LASSERT(net != NULL);
 	LASSERT(peer->ibp_connecting > 0);
@@ -1335,12 +1333,12 @@ kiblnd_connect_peer(kib_peer_t *peer)
 void
 kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 {
-	kib_peer_t	*peer;
-	kib_peer_t	*peer2;
-	kib_conn_t	*conn;
-	rwlock_t	*g_lock = &kiblnd_data.kib_global_lock;
-	unsigned long      flags;
-	int		rc;
+	kib_peer_t *peer;
+	kib_peer_t *peer2;
+	kib_conn_t *conn;
+	rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+	unsigned long flags;
+	int rc;
 
 	/* If I get here, I've committed to send, so I complete the tx with
 	 * failure on any problems */
@@ -1456,20 +1454,20 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 int
 kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 {
-	lnet_hdr_t       *hdr = &lntmsg->msg_hdr;
-	int	       type = lntmsg->msg_type;
+	lnet_hdr_t *hdr = &lntmsg->msg_hdr;
+	int type = lntmsg->msg_type;
 	lnet_process_id_t target = lntmsg->msg_target;
-	int	       target_is_router = lntmsg->msg_target_is_router;
-	int	       routing = lntmsg->msg_routing;
-	unsigned int      payload_niov = lntmsg->msg_niov;
-	struct kvec      *payload_iov = lntmsg->msg_iov;
-	lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
-	unsigned int      payload_offset = lntmsg->msg_offset;
-	unsigned int      payload_nob = lntmsg->msg_len;
-	kib_msg_t	*ibmsg;
-	kib_tx_t	 *tx;
-	int	       nob;
-	int	       rc;
+	int target_is_router = lntmsg->msg_target_is_router;
+	int routing = lntmsg->msg_routing;
+	unsigned int payload_niov = lntmsg->msg_niov;
+	struct kvec *payload_iov = lntmsg->msg_iov;
+	lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+	unsigned int payload_offset = lntmsg->msg_offset;
+	unsigned int payload_nob = lntmsg->msg_len;
+	kib_msg_t *ibmsg;
+	kib_tx_t *tx;
+	int nob;
+	int rc;
 
 	/* NB 'private' is different depending on what we're sending.... */
 
@@ -1628,13 +1626,13 @@ static void
 kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
 {
 	lnet_process_id_t target = lntmsg->msg_target;
-	unsigned int      niov = lntmsg->msg_niov;
-	struct kvec      *iov = lntmsg->msg_iov;
-	lnet_kiov_t      *kiov = lntmsg->msg_kiov;
-	unsigned int      offset = lntmsg->msg_offset;
-	unsigned int      nob = lntmsg->msg_len;
-	kib_tx_t	 *tx;
-	int	       rc;
+	unsigned int niov = lntmsg->msg_niov;
+	struct kvec *iov = lntmsg->msg_iov;
+	lnet_kiov_t *kiov = lntmsg->msg_kiov;
+	unsigned int offset = lntmsg->msg_offset;
+	unsigned int nob = lntmsg->msg_len;
+	kib_tx_t *tx;
+	int rc;
 
 	tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
 	if (tx = NULL) {
@@ -1691,14 +1689,14 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 	     unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
 	     unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-	kib_rx_t    *rx = private;
-	kib_msg_t   *rxmsg = rx->rx_msg;
-	kib_conn_t  *conn = rx->rx_conn;
-	kib_tx_t    *tx;
-	kib_msg_t   *txmsg;
-	int	  nob;
-	int	  post_credit = IBLND_POSTRX_PEER_CREDIT;
-	int	  rc = 0;
+	kib_rx_t *rx = private;
+	kib_msg_t *rxmsg = rx->rx_msg;
+	kib_conn_t *conn = rx->rx_conn;
+	kib_tx_t *tx;
+	kib_msg_t *txmsg;
+	int nob;
+	int post_credit = IBLND_POSTRX_PEER_CREDIT;
+	int rc = 0;
 
 	LASSERT(mlen <= rlen);
 	LASSERT(!in_interrupt());
@@ -1828,8 +1826,8 @@ kiblnd_peer_alive(kib_peer_t *peer)
 static void
 kiblnd_peer_notify(kib_peer_t *peer)
 {
-	int	   error = 0;
-	unsigned long    last_alive = 0;
+	int error = 0;
+	unsigned long last_alive = 0;
 	unsigned long flags;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1860,9 +1858,9 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
 	 * connection to be finished off by the connd.  Otherwise the connd is
 	 * already dealing with it (either to set it up or tear it down).
 	 * Caller holds kib_global_lock exclusively in irq context */
-	kib_peer_t       *peer = conn->ibc_peer;
-	kib_dev_t	*dev;
-	unsigned long     flags;
+	kib_peer_t *peer = conn->ibc_peer;
+	kib_dev_t *dev;
+	unsigned long flags;
 
 	LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -1934,8 +1932,8 @@ kiblnd_close_conn(kib_conn_t *conn, int error)
 static void
 kiblnd_handle_early_rxs(kib_conn_t *conn)
 {
-	unsigned long    flags;
-	kib_rx_t	*rx;
+	unsigned long flags;
+	kib_rx_t *rx;
 	kib_rx_t *tmp;
 
 	LASSERT(!in_interrupt());
@@ -1957,9 +1955,9 @@ static void
 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
 {
 	LIST_HEAD(zombies);
-	struct list_head	  *tmp;
-	struct list_head	  *nxt;
-	kib_tx_t	    *tx;
+	struct list_head *tmp;
+	struct list_head *nxt;
+	kib_tx_t *tx;
 
 	spin_lock(&conn->ibc_lock);
 
@@ -2018,7 +2016,7 @@ void
 kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
 {
 	LIST_HEAD(zombies);
-	unsigned long     flags;
+	unsigned long flags;
 
 	LASSERT(error != 0);
 	LASSERT(!in_interrupt());
@@ -2071,12 +2069,12 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
 void
 kiblnd_connreq_done(kib_conn_t *conn, int status)
 {
-	kib_peer_t	*peer = conn->ibc_peer;
-	kib_tx_t	  *tx;
+	kib_peer_t *peer = conn->ibc_peer;
+	kib_tx_t *tx;
 	kib_tx_t *tmp;
-	struct list_head	 txs;
-	unsigned long      flags;
-	int		active;
+	struct list_head txs;
+	unsigned long flags;
+	int active;
 
 	active = (conn->ibc_state = IBLND_CONN_ACTIVE_CONNECT);
 
@@ -2166,7 +2164,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 static void
 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 {
-	int	  rc;
+	int rc;
 
 	rc = rdma_reject(cmid, rej, sizeof(*rej));
 
@@ -2177,22 +2175,22 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 static int
 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
-	rwlock_t		*g_lock = &kiblnd_data.kib_global_lock;
-	kib_msg_t	     *reqmsg = priv;
-	kib_msg_t	     *ackmsg;
-	kib_dev_t	     *ibdev;
-	kib_peer_t	    *peer;
-	kib_peer_t	    *peer2;
-	kib_conn_t	    *conn;
-	lnet_ni_t	     *ni  = NULL;
-	kib_net_t	     *net = NULL;
-	lnet_nid_t	     nid;
+	rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+	kib_msg_t *reqmsg = priv;
+	kib_msg_t *ackmsg;
+	kib_dev_t *ibdev;
+	kib_peer_t *peer;
+	kib_peer_t *peer2;
+	kib_conn_t *conn;
+	lnet_ni_t *ni  = NULL;
+	kib_net_t *net = NULL;
+	lnet_nid_t nid;
 	struct rdma_conn_param cp;
-	kib_rej_t	      rej;
-	int		    version = IBLND_MSG_VERSION;
-	unsigned long	  flags;
-	int		    rc;
-	struct sockaddr_in    *peer_addr;
+	kib_rej_t rej;
+	int version = IBLND_MSG_VERSION;
+	unsigned long flags;
+	int rc;
+	struct sockaddr_in *peer_addr;
 	LASSERT(!in_interrupt());
 
 	/* cmid inherits 'context' from the corresponding listener id */
@@ -2200,8 +2198,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	LASSERT(ibdev != NULL);
 
 	memset(&rej, 0, sizeof(rej));
-	rej.ibr_magic		= IBLND_MSG_MAGIC;
-	rej.ibr_why		  = IBLND_REJECT_FATAL;
+	rej.ibr_magic = IBLND_MSG_MAGIC;
+	rej.ibr_why = IBLND_REJECT_FATAL;
 	rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
 
 	peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
@@ -2243,7 +2241,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	}
 
 	nid = reqmsg->ibm_srcnid;
-	ni  = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
+	ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
 
 	if (ni != NULL) {
 		net = (kib_net_t *)ni->ni_data;
@@ -2394,7 +2392,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	 * CM callback doesn't destroy cmid. */
 
 	conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
-	conn->ibc_credits	  = IBLND_MSG_QUEUE_SIZE(version);
+	conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
 	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
 	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
 		 <= IBLND_RX_MSGS(version));
@@ -2412,12 +2410,12 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
 	memset(&cp, 0, sizeof(cp));
 	cp.private_data	= ackmsg;
-	cp.private_data_len    = ackmsg->ibm_nob;
+	cp.private_data_len = ackmsg->ibm_nob;
 	cp.responder_resources = 0;	     /* No atomic ops or RDMA reads */
-	cp.initiator_depth     = 0;
+	cp.initiator_depth = 0;
 	cp.flow_control	= 1;
-	cp.retry_count	 = *kiblnd_tunables.kib_retry_count;
-	cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
+	cp.retry_count = *kiblnd_tunables.kib_retry_count;
+	cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
 
 	CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
 
@@ -2439,7 +2437,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	if (ni != NULL)
 		lnet_ni_decref(ni);
 
-	rej.ibr_version = version;
+	rej.ibr_version             = version;
 	rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
 	rej.ibr_cp.ibcp_max_frags   = IBLND_RDMA_FRAGS(version);
 	kiblnd_reject(cmid, &rej);
@@ -2451,10 +2449,10 @@ static void
 kiblnd_reconnect(kib_conn_t *conn, int version,
 		  __u64 incarnation, int why, kib_connparams_t *cp)
 {
-	kib_peer_t    *peer = conn->ibc_peer;
-	char	  *reason;
-	int	    retry = 0;
-	unsigned long  flags;
+	kib_peer_t *peer = conn->ibc_peer;
+	char *reason;
+	int retry = 0;
+	unsigned long flags;
 
 	LASSERT(conn->ibc_state = IBLND_CONN_ACTIVE_CONNECT);
 	LASSERT(peer->ibp_connecting > 0);     /* 'conn' at least */
@@ -2513,7 +2511,7 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
 static void
 kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 {
-	kib_peer_t    *peer = conn->ibc_peer;
+	kib_peer_t *peer = conn->ibc_peer;
 
 	LASSERT(!in_interrupt());
 	LASSERT(conn->ibc_state = IBLND_CONN_ACTIVE_CONNECT);
@@ -2532,10 +2530,10 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
 	case IB_CM_REJ_CONSUMER_DEFINED:
 		if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
-			kib_rej_t	*rej	 = priv;
-			kib_connparams_t *cp	  = NULL;
-			int	       flip	= 0;
-			__u64	     incarnation = -1;
+			kib_rej_t *rej = priv;
+			kib_connparams_t *cp = NULL;
+			int flip = 0;
+			__u64 incarnation = -1;
 
 			/* NB. default incarnation is -1 because:
 			 * a) V1 will ignore dst incarnation in connreq.
@@ -2652,13 +2650,13 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 static void
 kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 {
-	kib_peer_t    *peer = conn->ibc_peer;
-	lnet_ni_t     *ni   = peer->ibp_ni;
-	kib_net_t     *net  = ni->ni_data;
-	kib_msg_t     *msg  = priv;
-	int	    ver  = conn->ibc_version;
-	int	    rc   = kiblnd_unpack_msg(msg, priv_nob);
-	unsigned long  flags;
+	kib_peer_t *peer = conn->ibc_peer;
+	lnet_ni_t *ni = peer->ibp_ni;
+	kib_net_t *net = ni->ni_data;
+	kib_msg_t *msg = priv;
+	int ver = conn->ibc_version;
+	int rc = kiblnd_unpack_msg(msg, priv_nob);
+	unsigned long flags;
 
 	LASSERT(net != NULL);
 
@@ -2726,8 +2724,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 		goto failed;
 	}
 
-	conn->ibc_incarnation      = msg->ibm_srcstamp;
-	conn->ibc_credits	  +	conn->ibc_incarnation = msg->ibm_srcstamp;
+	conn->ibc_credits  	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
 	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
 		 <= IBLND_RX_MSGS(ver));
@@ -2749,20 +2747,20 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 static int
 kiblnd_active_connect(struct rdma_cm_id *cmid)
 {
-	kib_peer_t	      *peer = (kib_peer_t *)cmid->context;
-	kib_conn_t	      *conn;
-	kib_msg_t	       *msg;
-	struct rdma_conn_param   cp;
-	int		      version;
-	__u64		    incarnation;
-	unsigned long	    flags;
-	int		      rc;
+	kib_peer_t *peer = (kib_peer_t *)cmid->context;
+	kib_conn_t *conn;
+	kib_msg_t *msg;
+	struct rdma_conn_param cp;
+	int version;
+	__u64 incarnation;
+	unsigned long flags;
+	int rc;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
 	incarnation = peer->ibp_incarnation;
-	version     = (peer->ibp_version = 0) ? IBLND_MSG_VERSION :
-						 peer->ibp_version;
+	version = (peer->ibp_version = 0) ? IBLND_MSG_VERSION :
+					     peer->ibp_version;
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
@@ -2793,8 +2791,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 	cp.private_data_len    = msg->ibm_nob;
 	cp.responder_resources = 0;	     /* No atomic ops or RDMA reads */
 	cp.initiator_depth     = 0;
-	cp.flow_control	= 1;
-	cp.retry_count	 = *kiblnd_tunables.kib_retry_count;
+	cp.flow_control        = 1;
+	cp.retry_count         = *kiblnd_tunables.kib_retry_count;
 	cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
 
 	LASSERT(cmid->context = (void *)conn);
@@ -2814,9 +2812,9 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 int
 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 {
-	kib_peer_t  *peer;
-	kib_conn_t  *conn;
-	int	  rc;
+	kib_peer_t *peer;
+	kib_conn_t *conn;
+	int rc;
 
 	switch (event->event) {
 	default:
@@ -2983,8 +2981,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 static int
 kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
 {
-	kib_tx_t	  *tx;
-	struct list_head	*ttmp;
+	kib_tx_t *tx;
+	struct list_head *ttmp;
 
 	list_for_each(ttmp, txs) {
 		tx = list_entry(ttmp, kib_tx_t, tx_list);
@@ -3022,13 +3020,13 @@ kiblnd_check_conns(int idx)
 {
 	LIST_HEAD(closes);
 	LIST_HEAD(checksends);
-	struct list_head    *peers = &kiblnd_data.kib_peers[idx];
-	struct list_head    *ptmp;
-	kib_peer_t    *peer;
-	kib_conn_t    *conn;
+	struct list_head *peers = &kiblnd_data.kib_peers[idx];
+	struct list_head *ptmp;
+	kib_peer_t *peer;
+	kib_conn_t *conn;
 	kib_conn_t *tmp;
-	struct list_head    *ctmp;
-	unsigned long  flags;
+	struct list_head *ctmp;
+	unsigned long flags;
 
 	/* NB. We expect to have a look at all the peers and not find any
 	 * RDMAs to time out, so we just use a shared lock while we
@@ -3114,14 +3112,14 @@ kiblnd_disconnect_conn(kib_conn_t *conn)
 int
 kiblnd_connd(void *arg)
 {
-	wait_queue_t     wait;
-	unsigned long      flags;
-	kib_conn_t	*conn;
-	int		timeout;
-	int		i;
-	int		dropped_lock;
-	int		peer_index = 0;
-	unsigned long      deadline = jiffies;
+	wait_queue_t wait;
+	unsigned long flags;
+	kib_conn_t *conn;
+	int timeout;
+	int i;
+	int dropped_lock;
+	int peer_index = 0;
+	unsigned long deadline = jiffies;
 
 	cfs_block_allsigs();
 
@@ -3169,7 +3167,7 @@ kiblnd_connd(void *arg)
 		if (timeout <= 0) {
 			const int n = 4;
 			const int p = 1;
-			int       chunk = kiblnd_data.kib_peer_hash_size;
+			int chunk = kiblnd_data.kib_peer_hash_size;
 
 			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 			dropped_lock = 1;
@@ -3273,9 +3271,9 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
 	 * consuming my CQ I could be called after all completions have
 	 * occurred.  But in this case, ibc_nrx = 0 && ibc_nsends_posted = 0
 	 * and this CQ is about to be destroyed so I NOOP. */
-	kib_conn_t		*conn = (kib_conn_t *)arg;
-	struct kib_sched_info	*sched = conn->ibc_sched;
-	unsigned long		flags;
+	kib_conn_t *conn = (kib_conn_t *)arg;
+	struct kib_sched_info *sched = conn->ibc_sched;
+	unsigned long flags;
 
 	LASSERT(cq = conn->ibc_cq);
 
@@ -3309,15 +3307,15 @@ kiblnd_cq_event(struct ib_event *event, void *arg)
 int
 kiblnd_scheduler(void *arg)
 {
-	long			id = (long)arg;
-	struct kib_sched_info	*sched;
-	kib_conn_t		*conn;
-	wait_queue_t		wait;
-	unsigned long		flags;
-	struct ib_wc		wc;
-	int			did_something;
-	int			busy_loops = 0;
-	int			rc;
+	long id = (long)arg;
+	struct kib_sched_info *sched;
+	kib_conn_t *conn;
+	wait_queue_t wait;
+	unsigned long flags;
+	struct ib_wc wc;
+	int did_something;
+	int busy_loops = 0;
+	int rc;
 
 	cfs_block_allsigs();
 
@@ -3432,11 +3430,11 @@ kiblnd_scheduler(void *arg)
 int
 kiblnd_failover_thread(void *arg)
 {
-	rwlock_t		*glock = &kiblnd_data.kib_global_lock;
-	kib_dev_t	 *dev;
-	wait_queue_t     wait;
-	unsigned long      flags;
-	int		rc;
+	rwlock_t *glock = &kiblnd_data.kib_global_lock;
+	kib_dev_t *dev;
+	wait_queue_t wait;
+	unsigned long flags;
+	int rc;
 
 	LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
 
@@ -3446,8 +3444,8 @@ kiblnd_failover_thread(void *arg)
 	write_lock_irqsave(glock, flags);
 
 	while (!kiblnd_data.kib_shutdown) {
-		int     do_failover = 0;
-		int     long_sleep;
+		int do_failover = 0;
+		int long_sleep;
 
 		list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
 				    ibd_fail_list) {
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index eedf01a..b0e0036 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -150,30 +150,30 @@ module_param(use_privileged_port, int, 0644);
 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
 
 kib_tunables_t kiblnd_tunables = {
-	.kib_dev_failover	   = &dev_failover,
-	.kib_service		= &service,
-	.kib_cksum		  = &cksum,
-	.kib_timeout		= &timeout,
-	.kib_keepalive	      = &keepalive,
-	.kib_ntx		    = &ntx,
-	.kib_credits		= &credits,
-	.kib_peertxcredits	  = &peer_credits,
-	.kib_peercredits_hiw	= &peer_credits_hiw,
-	.kib_peerrtrcredits	 = &peer_buffer_credits,
-	.kib_peertimeout	    = &peer_timeout,
-	.kib_default_ipif	   = &ipif_name,
-	.kib_retry_count	    = &retry_count,
-	.kib_rnr_retry_count	= &rnr_retry_count,
-	.kib_concurrent_sends       = &concurrent_sends,
-	.kib_ib_mtu		 = &ib_mtu,
-	.kib_map_on_demand	  = &map_on_demand,
-	.kib_fmr_pool_size	  = &fmr_pool_size,
-	.kib_fmr_flush_trigger      = &fmr_flush_trigger,
-	.kib_fmr_cache	      = &fmr_cache,
-	.kib_pmr_pool_size	  = &pmr_pool_size,
-	.kib_require_priv_port      = &require_privileged_port,
-	.kib_use_priv_port	    = &use_privileged_port,
-	.kib_nscheds		    = &nscheds
+	.kib_dev_failover      = &dev_failover,
+	.kib_service           = &service,
+	.kib_cksum             = &cksum,
+	.kib_timeout           = &timeout,
+	.kib_keepalive         = &keepalive,
+	.kib_ntx               = &ntx,
+	.kib_credits           = &credits,
+	.kib_peertxcredits     = &peer_credits,
+	.kib_peercredits_hiw   = &peer_credits_hiw,
+	.kib_peerrtrcredits    = &peer_buffer_credits,
+	.kib_peertimeout       = &peer_timeout,
+	.kib_default_ipif      = &ipif_name,
+	.kib_retry_count       = &retry_count,
+	.kib_rnr_retry_count   = &rnr_retry_count,
+	.kib_concurrent_sends  = &concurrent_sends,
+	.kib_ib_mtu            = &ib_mtu,
+	.kib_map_on_demand     = &map_on_demand,
+	.kib_fmr_pool_size     = &fmr_pool_size,
+	.kib_fmr_flush_trigger = &fmr_flush_trigger,
+	.kib_fmr_cache         = &fmr_cache,
+	.kib_pmr_pool_size     = &pmr_pool_size,
+	.kib_require_priv_port = &require_privileged_port,
+	.kib_use_priv_port     = &use_privileged_port,
+	.kib_nscheds           = &nscheds
 };
 
 int
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (15 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue Mike Shuey
                   ` (5 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

LNET_MUTEX_LOCK and LNET_MUTEX_UNLOCK are verbose wrappers to mutex_lock and
mutex_unlock.  Get rid of these.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/include/linux/lnet/lib-lnet.h   |    2 -
 drivers/staging/lustre/lnet/lnet/api-ni.c          |   26 ++++++++++----------
 drivers/staging/lustre/lnet/lnet/module.c          |   12 ++++----
 3 files changed, 19 insertions(+), 21 deletions(-)

diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 0038d29..d84aa9a 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -172,8 +172,6 @@ lnet_net_lock_current(void)
 #define lnet_eq_wait_unlock()	spin_unlock(&the_lnet.ln_eq_wait_lock)
 #define lnet_ni_lock(ni)	spin_lock(&(ni)->ni_lock)
 #define lnet_ni_unlock(ni)	spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m)	mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m)	mutex_unlock(m)
 
 #define MAX_PORTALS     64
 
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 1adc481..2230eb0 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -282,7 +282,7 @@ lnet_find_lnd_by_type(int type)
 void
 lnet_register_lnd(lnd_t *lnd)
 {
-	LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+	mutex_lock(&the_lnet.ln_lnd_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
@@ -293,14 +293,14 @@ lnet_register_lnd(lnd_t *lnd)
 
 	CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
 
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+	mutex_unlock(&the_lnet.ln_lnd_mutex);
 }
 EXPORT_SYMBOL(lnet_register_lnd);
 
 void
 lnet_unregister_lnd(lnd_t *lnd)
 {
-	LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+	mutex_lock(&the_lnet.ln_lnd_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) = lnd);
@@ -309,7 +309,7 @@ lnet_unregister_lnd(lnd_t *lnd)
 	list_del(&lnd->lnd_list);
 	CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
 
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+	mutex_unlock(&the_lnet.ln_lnd_mutex);
 }
 EXPORT_SYMBOL(lnet_unregister_lnd);
 
@@ -1055,18 +1055,18 @@ lnet_startup_lndnis(void)
 			goto failed;
 		}
 
-		LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+		mutex_lock(&the_lnet.ln_lnd_mutex);
 		lnd = lnet_find_lnd_by_type(lnd_type);
 
 		if (lnd = NULL) {
-			LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+			mutex_unlock(&the_lnet.ln_lnd_mutex);
 			rc = request_module("%s",
 						libcfs_lnd2modname(lnd_type));
-			LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+			mutex_lock(&the_lnet.ln_lnd_mutex);
 
 			lnd = lnet_find_lnd_by_type(lnd_type);
 			if (lnd = NULL) {
-				LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+				mutex_unlock(&the_lnet.ln_lnd_mutex);
 				CERROR("Can't load LND %s, module %s, rc=%d\n",
 				       libcfs_lnd2str(lnd_type),
 				       libcfs_lnd2modname(lnd_type), rc);
@@ -1082,7 +1082,7 @@ lnet_startup_lndnis(void)
 
 		rc = (lnd->lnd_startup)(ni);
 
-		LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+		mutex_unlock(&the_lnet.ln_lnd_mutex);
 
 		if (rc != 0) {
 			LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
@@ -1272,7 +1272,7 @@ LNetNIInit(lnet_pid_t requested_pid)
 	int im_a_router = 0;
 	int rc;
 
-	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+	mutex_lock(&the_lnet.ln_api_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
@@ -1343,7 +1343,7 @@ LNetNIInit(lnet_pid_t requested_pid)
  failed0:
 	LASSERT(rc < 0);
  out:
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+	mutex_unlock(&the_lnet.ln_api_mutex);
 	return rc;
 }
 EXPORT_SYMBOL(LNetNIInit);
@@ -1360,7 +1360,7 @@ EXPORT_SYMBOL(LNetNIInit);
 int
 LNetNIFini(void)
 {
-	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+	mutex_lock(&the_lnet.ln_api_mutex);
 
 	LASSERT(the_lnet.ln_init);
 	LASSERT(the_lnet.ln_refcount > 0);
@@ -1383,7 +1383,7 @@ LNetNIFini(void)
 		lnet_unprepare();
 	}
 
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+	mutex_unlock(&the_lnet.ln_api_mutex);
 	return 0;
 }
 EXPORT_SYMBOL(LNetNIFini);
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index f73d644..6881b9c 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -49,7 +49,7 @@ lnet_configure(void *arg)
 	/* 'arg' only there so I can be passed to cfs_create_thread() */
 	int rc = 0;
 
-	LNET_MUTEX_LOCK(&lnet_config_mutex);
+	mutex_lock(&lnet_config_mutex);
 
 	if (!the_lnet.ln_niinit_self) {
 		rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
@@ -59,7 +59,7 @@ lnet_configure(void *arg)
 		}
 	}
 
-	LNET_MUTEX_UNLOCK(&lnet_config_mutex);
+	mutex_unlock(&lnet_config_mutex);
 	return rc;
 }
 
@@ -68,18 +68,18 @@ lnet_unconfigure(void)
 {
 	int refcount;
 
-	LNET_MUTEX_LOCK(&lnet_config_mutex);
+	mutex_lock(&lnet_config_mutex);
 
 	if (the_lnet.ln_niinit_self) {
 		the_lnet.ln_niinit_self = 0;
 		LNetNIFini();
 	}
 
-	LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+	mutex_lock(&the_lnet.ln_api_mutex);
 	refcount = the_lnet.ln_refcount;
-	LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+	mutex_unlock(&the_lnet.ln_api_mutex);
 
-	LNET_MUTEX_UNLOCK(&lnet_config_mutex);
+	mutex_unlock(&lnet_config_mutex);
 	return (refcount = 0) ? 0 : -EBUSY;
 }
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (16 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
                   ` (4 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Some debug code was left in the tree, blocked by a #if 0.  Also kicked out
checkpatch.pl warnings.  Remove it.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/lib-msg.c |   17 -----------------
 1 files changed, 0 insertions(+), 17 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 65d7595..55e6638 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -444,23 +444,6 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
 
 	if (msg = NULL)
 		return;
-#if 0
-	CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
-	       lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
-	       msg->msg_target_is_router ? "t" : "",
-	       msg->msg_routing ? "X" : "",
-	       msg->msg_ack ? "A" : "",
-	       msg->msg_sending ? "S" : "",
-	       msg->msg_receiving ? "R" : "",
-	       msg->msg_delayed ? "d" : "",
-	       msg->msg_txcredit ? "C" : "",
-	       msg->msg_peertxcredit ? "c" : "",
-	       msg->msg_rtrcredit ? "F" : "",
-	       msg->msg_peerrtrcredit ? "f" : "",
-	       msg->msg_onactivelist ? "!" : "",
-	       msg->msg_txpeer = NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
-	       msg->msg_rxpeer = NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
-#endif
 	msg->msg_ev.status = status;
 
 	if (msg->msg_md != NULL) {
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (17 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 11/13] staging: lnet: o2iblnd: " Mike Shuey
                   ` (3 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Fix many checkpatch.pl warnings.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/lnet/acceptor.c    |   69 ++++++++++++++---------
 drivers/staging/lustre/lnet/lnet/api-ni.c      |   18 ++++--
 drivers/staging/lustre/lnet/lnet/config.c      |    2 +-
 drivers/staging/lustre/lnet/lnet/lib-eq.c      |    4 +-
 drivers/staging/lustre/lnet/lnet/lib-md.c      |    4 +-
 drivers/staging/lustre/lnet/lnet/lib-ptl.c     |    9 ++-
 drivers/staging/lustre/lnet/lnet/router.c      |   17 ++++--
 drivers/staging/lustre/lnet/lnet/router_proc.c |   16 ++++--
 8 files changed, 88 insertions(+), 51 deletions(-)

diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 69d4b19..21af335 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
 	switch (rc) {
 	/* "normal" errors */
 	case -ECONNREFUSED:
-		CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
-			libcfs_nid2str(peer_nid),
-			&peer_ip, peer_port);
+		CNETERR(
+			"Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
+			libcfs_nid2str(peer_nid), &peer_ip, peer_port);
 		break;
 	case -EHOSTUNREACH:
 	case -ENETUNREACH:
-		CNETERR("Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
+		CNETERR(
+			"Connection to %s at host %pI4h was unreachable: the network or that node may be down, or Lustre may be misconfigured.\n",
 			libcfs_nid2str(peer_nid), &peer_ip);
 		break;
 	case -ETIMEDOUT:
-		CNETERR("Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
-			libcfs_nid2str(peer_nid),
-			&peer_ip, peer_port);
+		CNETERR(
+			"Connection to %s at host %pI4h on port %d took too long: that node may be hung or experiencing high load.\n",
+			libcfs_nid2str(peer_nid), &peer_ip, peer_port);
 		break;
 	case -ECONNRESET:
-		LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
-				   libcfs_nid2str(peer_nid),
-				   &peer_ip, peer_port,
-				   libcfs_nid2str(peer_nid));
+		LCONSOLE_ERROR_MSG(0x11b,
+				   "Connection to %s at host %pI4h on port %d was reset: is it running a compatible version of Lustre and is %s one of its NIDs?\n",
+				   libcfs_nid2str(peer_nid), &peer_ip,
+				   peer_port, libcfs_nid2str(peer_nid));
 		break;
 	case -EPROTO:
-		LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
-				   libcfs_nid2str(peer_nid),
-				   &peer_ip, peer_port);
+		LCONSOLE_ERROR_MSG(0x11c,
+				   "Protocol error connecting to %s at host %pI4h on port %d: is it running a compatible version of Lustre?\n",
+				   libcfs_nid2str(peer_nid), &peer_ip,
+				   peer_port);
 		break;
 	case -EADDRINUSE:
-		LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to connect to %s at host %pI4h on port %d\n",
+		LCONSOLE_ERROR_MSG(0x11d,
+				   "No privileged ports available to connect to %s at host %pI4h on port %d\n",
 				   libcfs_nid2str(peer_nid),
 				   &peer_ip, peer_port);
 		break;
 	default:
-		LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
+		LCONSOLE_ERROR_MSG(0x11e,
+				   "Unexpected error %d connecting to %s at host %pI4h on port %d\n",
 				   rc, libcfs_nid2str(peer_nid),
 				   &peer_ip, peer_port);
 		break;
@@ -238,7 +242,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 					       accept_timeout);
 
 			if (rc != 0)
-				CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
+				CERROR(
+				       "Error sending magic+version in response to LNET magic from %pI4h: %d\n",
 				       &peer_ip, rc);
 			return -EPROTO;
 		}
@@ -250,7 +255,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 		else
 			str = "unrecognised";
 
-		LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
+		LCONSOLE_ERROR_MSG(0x11f,
+				   "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n",
 				   &peer_ip, magic, str);
 		return -EPROTO;
 	}
@@ -261,7 +267,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 			      sizeof(cr.acr_version),
 			      accept_timeout);
 	if (rc != 0) {
-		CERROR("Error %d reading connection request version from %pI4h\n",
+		CERROR(
+			"Error %d reading connection request version from %pI4h\n",
 			rc, &peer_ip);
 		return -EIO;
 	}
@@ -284,7 +291,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 				       accept_timeout);
 
 		if (rc != 0)
-			CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
+			CERROR(
+			       "Error sending magic+version in response to version %d from %pI4h: %d\n",
 			       peer_version, &peer_ip, rc);
 		return -EPROTO;
 	}
@@ -294,7 +302,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 			      offsetof(lnet_acceptor_connreq_t, acr_nid),
 			      accept_timeout);
 	if (rc != 0) {
-		CERROR("Error %d reading connection request from %pI4h\n",
+		CERROR(
+			"Error %d reading connection request from %pI4h\n",
 			rc, &peer_ip);
 		return -EIO;
 	}
@@ -307,7 +316,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 	    ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
 		if (ni != NULL)
 			lnet_ni_decref(ni);
-		LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
+		LCONSOLE_ERROR_MSG(0x120,
+				   "Refusing connection from %pI4h for %s: No matching NI\n",
 				   &peer_ip, libcfs_nid2str(cr.acr_nid));
 		return -EPERM;
 	}
@@ -315,7 +325,8 @@ lnet_accept(struct socket *sock, __u32 magic)
 	if (ni->ni_lnd->lnd_accept = NULL) {
 		/* This catches a request for the loopback LND */
 		lnet_ni_decref(ni);
-		LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
+		LCONSOLE_ERROR_MSG(0x121,
+				  "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
 				  &peer_ip, libcfs_nid2str(cr.acr_nid));
 		return -EPERM;
 	}
@@ -347,10 +358,12 @@ lnet_acceptor(void *arg)
 				0, accept_port, accept_backlog);
 	if (rc != 0) {
 		if (rc = -EADDRINUSE)
-			LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
+			LCONSOLE_ERROR_MSG(0x122,
+					   "Can't start acceptor on port %d: port already in use\n",
 					   accept_port);
 		else
-			LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n",
+			LCONSOLE_ERROR_MSG(0x123,
+					   "Can't start acceptor on port %d: unexpected error %d\n",
 					   accept_port, rc);
 
 		lnet_acceptor_state.pta_sock = NULL;
@@ -390,7 +403,8 @@ lnet_acceptor(void *arg)
 		}
 
 		if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
-			CERROR("Refusing connection from %pI4h: insecure port %d\n",
+			CERROR(
+			       "Refusing connection from %pI4h: insecure port %d\n",
 			       &peer_ip, peer_port);
 			goto failed;
 		}
@@ -398,7 +412,8 @@ lnet_acceptor(void *arg)
 		rc = libcfs_sock_read(newsock, &magic, sizeof(magic),
 				      accept_timeout);
 		if (rc != 0) {
-			CERROR("Error %d reading connection request from %pI4h\n",
+			CERROR(
+				"Error %d reading connection request from %pI4h\n",
 				rc, &peer_ip);
 			goto failed;
 		}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 2230eb0..6af8a5f 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -74,7 +74,8 @@ lnet_get_networks(void)
 	int rc;
 
 	if (*networks != 0 && *ip2nets != 0) {
-		LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
+		LCONSOLE_ERROR_MSG(0x101,
+				   "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
 		return NULL;
 	}
 
@@ -189,7 +190,8 @@ static void lnet_assert_wire_constants(void)
 	/* Checks for struct ptl_handle_wire_t */
 	CLASSERT((int)sizeof(lnet_handle_wire_t) = 16);
 	CLASSERT((int)offsetof(lnet_handle_wire_t, wh_interface_cookie) = 0);
-	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) = 8);
+	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) =
+		8);
 	CLASSERT((int)offsetof(lnet_handle_wire_t, wh_object_cookie) = 8);
 	CLASSERT((int)sizeof(((lnet_handle_wire_t *)0)->wh_object_cookie) = 8);
 
@@ -1085,7 +1087,8 @@ lnet_startup_lndnis(void)
 		mutex_unlock(&the_lnet.ln_lnd_mutex);
 
 		if (rc != 0) {
-			LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
+			LCONSOLE_ERROR_MSG(0x105,
+					   "Error %d starting up LNI %s\n",
 					   rc, libcfs_lnd2str(lnd->lnd_type));
 			lnet_net_lock(LNET_LOCK_EX);
 			lnd->lnd_refcount--;
@@ -1141,7 +1144,8 @@ lnet_startup_lndnis(void)
 
 	if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
 		lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
-		LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n",
+		LCONSOLE_ERROR_MSG(0x109,
+				   "LND %s can only run single-network\n",
 				   libcfs_lnd2str(lnd_type));
 		goto failed;
 	}
@@ -1187,7 +1191,8 @@ LNetInit(void)
 	LASSERT(the_lnet.ln_cpt_number > 0);
 	if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
 		/* we are under risk of consuming all lh_cookie */
-		CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
+		CERROR(
+		       "Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
 		       the_lnet.ln_cpt_number, LNET_CPT_MAX);
 		return -1;
 	}
@@ -1749,7 +1754,8 @@ lnet_ping_target_fini(void)
 }
 
 int
-lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
+lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids,
+	  int n_ids)
 {
 	lnet_handle_eq_t eqh;
 	lnet_handle_md_t mdh;
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 70bc809..8359480 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 368bde9..0c01834 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -82,7 +82,9 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
 	count = cfs_power2_roundup(count);
 
 	if (callback != LNET_EQ_HANDLER_NONE && count != 0)
-		CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
+		CWARN(
+		      "EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n",
+		      count);
 
 	/* count can be 0 if only need callback, we can eliminate
 	 * overhead of enqueue event */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 5856c30..04b1955 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -144,7 +144,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 
 		lmd->md_length = total_length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+		if ((umd->options & LNET_MD_MAX_SIZE) != 0 &&/* max size used */
 		    (umd->max_size < 0 ||
 		     umd->max_size > total_length)) /* illegal max_size */
 			return -EINVAL;
@@ -154,7 +154,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
 		lmd->md_iov.iov[0].iov_base = umd->start;
 		lmd->md_iov.iov[0].iov_len = umd->length;
 
-		if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+		if ((umd->options & LNET_MD_MAX_SIZE) != 0 &&/* max size used */
 		    (umd->max_size < 0 ||
 		     umd->max_size > (int)umd->length)) /* illegal max_size */
 			return -EINVAL;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 84707c5..4735e9c 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -184,7 +184,8 @@ lnet_try_match_md(lnet_libmd_t *md,
 		mlength = info->mi_rlength;
 	} else if ((md->md_options & LNET_MD_TRUNCATE) = 0) {
 		/* this packet _really_ is too big */
-		CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
+		CERROR(
+		       "Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
 		       libcfs_id2str(info->mi_id), info->mi_mbits,
 		       info->mi_rlength, md->md_length - offset, mlength);
 
@@ -192,7 +193,8 @@ lnet_try_match_md(lnet_libmd_t *md,
 	}
 
 	/* Commit to this ME/MD */
-	CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
+	CDEBUG(D_NET,
+	       "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
 	       (info->mi_opc = LNET_MD_OP_PUT) ? "put" : "get",
 	       info->mi_portal, libcfs_id2str(info->mi_id), mlength,
 	       info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
@@ -685,7 +687,8 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
 		if ((rc & LNET_MATCHMD_OK) != 0) {
 			list_add_tail(&msg->msg_list, matches);
 
-			CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
+			CDEBUG(D_NET,
+			       "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
 			       libcfs_id2str(info.mi_id),
 			       info.mi_portal, info.mi_mbits,
 			       info.mi_roffset, info.mi_rlength);
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 96886a2..1f5629e 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -456,7 +456,8 @@ lnet_check_routes(void)
 
 				lnet_net_unlock(cpt);
 
-				CERROR("Routes to %s via %s and %s not supported\n",
+				CERROR(
+				       "Routes to %s via %s and %s not supported\n",
 				       libcfs_net2str(net),
 				       libcfs_nid2str(nid1),
 				       libcfs_nid2str(nid2));
@@ -1020,7 +1021,8 @@ lnet_router_checker_start(void)
 
 	if (check_routers_before_use &&
 	    dead_router_check_interval <= 0) {
-		LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
+		LCONSOLE_ERROR_MSG(0x10a,
+				   "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
 		return -EINVAL;
 	}
 
@@ -1321,7 +1323,8 @@ lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
 		rb = lnet_new_rtrbuf(rbp, cpt);
 
 		if (rb = NULL) {
-			CERROR("Failed to allocate %d router bufs of %d pages\n",
+			CERROR(
+			       "Failed to allocate %d router bufs of %d pages\n",
 			       nbufs, rbp->rbp_npages);
 			return -ENOMEM;
 		}
@@ -1451,7 +1454,8 @@ lnet_rtrpools_alloc(int im_a_router)
 	} else if (!strcmp(forwarding, "enabled")) {
 		/* explicitly enabled */
 	} else {
-		LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
+		LCONSOLE_ERROR_MSG(0x10b,
+				   "'forwarding' not set to either 'enabled' or 'disabled'\n");
 		return -EINVAL;
 	}
 
@@ -1511,7 +1515,7 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 	unsigned long now = cfs_time_current();
 	int cpt = lnet_cpt_of_nid(nid);
 
-	LASSERT(!in_interrupt ());
+	LASSERT(!in_interrupt());
 
 	CDEBUG(D_NET, "%s notifying %s: %s\n",
 		(ni = NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
@@ -1528,7 +1532,8 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
 
 	/* can't do predictions... */
 	if (cfs_time_after(when, now)) {
-		CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
+		CWARN(
+		      "Ignoring prediction from %s of %s %s %ld seconds in the future\n",
 		      (ni = NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
 		      libcfs_nid2str(nid), alive ? "up" : "down",
 		      cfs_duration_sec(cfs_time_sub(when, now)));
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index ee902dc..35f1d14 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -49,7 +49,8 @@ enum {
  */
 #define LNET_PROC_CPT_BITS	(LNET_CPT_BITS + 1)
 /* change version, 16 bits or 8 bits */
-#define LNET_PROC_VER_BITS	max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8)
+#define LNET_PROC_VER_BITS	max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, \
+						    64) / 4, 8)
 
 #define LNET_PROC_HASH_BITS	LNET_PEER_HASH_BITS
 /*
@@ -396,7 +397,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
 					      nrefs, nrtrrefs, alive_cnt,
 					      alive ? "up" : "down", last_ping,
 					      pingsent,
-					      cfs_duration_sec(cfs_time_sub(deadline, now)),
+					      cfs_duration_sec(
+						      cfs_time_sub(deadline,
+								   now)),
 					      down_ni, libcfs_nid2str(nid));
 			LASSERT(tmpstr + tmpsiz - s > 0);
 		}
@@ -791,17 +794,20 @@ static struct lnet_portal_rotors	portal_rotors[] = {
 	{
 		.pr_value = LNET_PTL_ROTOR_ON,
 		.pr_name  = "ON",
-		.pr_desc  = "round-robin dispatch all PUT messages for wildcard portals"
+		.pr_desc  +			"round-robin dispatch all PUT messages for wildcard portals"
 	},
 	{
 		.pr_value = LNET_PTL_ROTOR_RR_RT,
 		.pr_name  = "RR_RT",
-		.pr_desc  = "round-robin dispatch routed PUT message for wildcard portals"
+		.pr_desc  +			"round-robin dispatch routed PUT message for wildcard portals"
 	},
 	{
 		.pr_value = LNET_PTL_ROTOR_HASH_RT,
 		.pr_name  = "HASH_RT",
-		.pr_desc  = "dispatch routed PUT message by hashing source NID for wildcard portals"
+		.pr_desc  +			"dispatch routed PUT message by hashing source NID for wildcard portals"
 	},
 	{
 		.pr_value = -1,
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (18 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups Mike Shuey
                   ` (2 subsequent siblings)
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Clean up most checkpatch.pl warnings (primarily line length warnings).

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c    |   25 ++-
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h    |  204 +++++++++++---------
 .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |  184 +++++++++++-------
 .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c  |   55 ++++--
 4 files changed, 277 insertions(+), 191 deletions(-)

diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index a57c5c3..acfa465 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1166,10 +1166,11 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
 		rx->rx_conn = conn;
 		rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
 
-		rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
-						       rx->rx_msg,
-						       IBLND_MSG_SIZE,
-						       DMA_FROM_DEVICE);
+		rx->rx_msgaddr = kiblnd_dma_map_single(
+						conn->ibc_hdev->ibh_ibdev,
+						rx->rx_msg,
+						IBLND_MSG_SIZE,
+						DMA_FROM_DEVICE);
 		LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
 						   rx->rx_msgaddr));
 		KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
@@ -1387,7 +1388,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
 		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
 		.page_shift        = PAGE_SHIFT,
 		.access            = (IB_ACCESS_LOCAL_WRITE |
-		                      IB_ACCESS_REMOTE_WRITE),
+				      IB_ACCESS_REMOTE_WRITE),
 		.pool_size         = fps->fps_pool_size,
 		.dirty_watermark   = fps->fps_flush_trigger,
 		.flush_function    = NULL,
@@ -1753,7 +1754,8 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
 	if (ps->ps_increasing) {
 		/* another thread is allocating a new pool */
 		spin_unlock(&ps->ps_lock);
-		CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
+		CDEBUG(D_NET,
+		       "Another thread is allocating new %s pool, waiting for her to complete\n",
 		       ps->ps_name);
 		schedule();
 		goto again;
@@ -2354,7 +2356,8 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 	if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
 		/* it's 4T..., assume we will re-code at that time */
-		CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n",
+		CERROR(
+		       "Can't support memory size: x%#llx with MR size: x%#llx\n",
 		       mm_size, mr_size);
 		return -EINVAL;
 	}
@@ -2376,7 +2379,8 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 		mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
 		if (IS_ERR(mr)) {
-			CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
+			CERROR(
+			       "Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
 			       ipb.addr, ipb.size, PTR_ERR(mr));
 			kiblnd_hdev_cleanup_mrs(hdev);
 			return PTR_ERR(mr);
@@ -2389,7 +2393,8 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 
 out:
 	if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
-		LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n",
+		LCONSOLE_INFO(
+			      "Register global MR array, MR size: %#llx, array size: %d\n",
 			      hdev->ibh_mr_size, hdev->ibh_nmrs);
 	return 0;
 }
@@ -2758,7 +2763,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
 		i = 2;
 		while (atomic_read(&net->ibn_npeers) != 0) {
 			i++;
-			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET, /* 2**n? */
+			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET,/* 2**n? */
 			       "%s: waiting for %d peers to disconnect\n",
 			       libcfs_nid2str(ni->ni_nid),
 			       atomic_read(&net->ibn_npeers));
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 7f52c69..976665b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -48,7 +48,7 @@
 #include <linux/uio.h>
 #include <linux/uaccess.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/fs.h>
 #include <linux/file.h>
@@ -118,17 +118,23 @@ typedef struct {
 extern kib_tunables_t  kiblnd_tunables;
 
 #define IBLND_MSG_QUEUE_SIZE_V1   8 /* V1 only : # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
+#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return
+				     * credits */
 
 #define IBLND_CREDITS_DEFAULT     8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
-#define IBLND_MSG_QUEUE_SIZE(v)    ((v) = IBLND_MSG_VERSION_1 ? \
-				     IBLND_MSG_QUEUE_SIZE_V1 :   \
-				     *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
+/* Max # of peer credits */
+#define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1)
+
+/* # messages/RDMAs in-flight */
+#define IBLND_MSG_QUEUE_SIZE(v)   ((v) = IBLND_MSG_VERSION_1 ? \
+				   IBLND_MSG_QUEUE_SIZE_V1 :    \
+				   *kiblnd_tunables.kib_peertxcredits)
+
+/* when eagerly to return credits */
 #define IBLND_CREDITS_HIGHWATER(v) ((v) = IBLND_MSG_VERSION_1 ? \
-				     IBLND_CREDIT_HIGHWATER_V1 : \
-				     *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+				    IBLND_CREDIT_HIGHWATER_V1 :  \
+				    *kiblnd_tunables.kib_peercredits_hiw)
 
 #define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
 
@@ -144,20 +150,23 @@ kiblnd_concurrent_sends_v1(void)
 	return *kiblnd_tunables.kib_concurrent_sends;
 }
 
-#define IBLND_CONCURRENT_SENDS(v)  ((v) = IBLND_MSG_VERSION_1 ? \
-				     kiblnd_concurrent_sends_v1() : \
-				     *kiblnd_tunables.kib_concurrent_sends)
+#define IBLND_CONCURRENT_SENDS(v) ((v) = IBLND_MSG_VERSION_1 ?           \
+				   kiblnd_concurrent_sends_v1() :         \
+				   *kiblnd_tunables.kib_concurrent_sends)
 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
 #define IBLND_OOB_CAPABLE(v)       ((v) != IBLND_MSG_VERSION_1)
 #define IBLND_OOB_MSGS(v)	   (IBLND_OOB_CAPABLE(v) ? 2 : 0)
 
-#define IBLND_MSG_SIZE	      (4<<10)		 /* max size of queued messages (inc hdr) */
-#define IBLND_MAX_RDMA_FRAGS	 LNET_MAX_IOV	   /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand != 0 ? \
-				    *kiblnd_tunables.kib_map_on_demand :      \
-				     IBLND_MAX_RDMA_FRAGS)  /* max # of fragments configured by user */
+#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
+
+/* max # of fragments configured by user */
+#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+			      *kiblnd_tunables.kib_map_on_demand :      \
+			      IBLND_MAX_RDMA_FRAGS)
 #define IBLND_RDMA_FRAGS(v)	((v) = IBLND_MSG_VERSION_1 ? \
-				     IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
+				     IBLND_MAX_RDMA_FRAGS :   \
+				     IBLND_CFG_RDMA_FRAGS)
 
 /************************/
 /* derived constants... */
@@ -172,14 +181,16 @@ kiblnd_concurrent_sends_v1(void)
 #define IBLND_TX_MSGS()	    (*kiblnd_tunables.kib_ntx)
 
 /* RX messages (per connection) */
-#define IBLND_RX_MSGS(v)	    (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
-#define IBLND_RX_MSG_BYTES(v)       (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(v)      ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+#define IBLND_RX_MSGS(v)      (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
+#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / \
+			       PAGE_SIZE)
 
 /* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(v)	    IBLND_RX_MSGS(v)
-#define IBLND_SEND_WRS(v)	  ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
-#define IBLND_CQ_ENTRIES(v)	 (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+#define IBLND_RECV_WRS(v)   IBLND_RX_MSGS(v)
+#define IBLND_SEND_WRS(v)   ((IBLND_RDMA_FRAGS(v) + 1) *            \
+			     IBLND_CONCURRENT_SENDS(v))
+#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
 
 struct kib_hca_dev;
 
@@ -520,7 +531,9 @@ typedef struct kib_rx                         /* receive message */
 	enum ib_wc_status      rx_status;     /* completion status */
 	kib_msg_t              *rx_msg;       /* message buffer (host vaddr) */
 	__u64                  rx_msgaddr;    /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+
+	DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);  /* for dma_unmap_single() */
+
 	struct ib_recv_wr      rx_wrq;        /* receive work item... */
 	struct ib_sge          rx_sge;        /* ...and its memory */
 } kib_rx_t;
@@ -547,7 +560,9 @@ typedef struct kib_tx                         /* transmit message */
 					       * completion */
 	kib_msg_t              *tx_msg;       /* message buffer (host vaddr) */
 	__u64                  tx_msgaddr;    /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+
+	DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);  /* for dma_unmap_single() */
+
 	int                    tx_nwrq;       /* # send work items */
 	struct ib_send_wr      *tx_wrq;       /* send work items... */
 	struct ib_sge          *tx_sge;       /* ...and their memory */
@@ -648,14 +663,14 @@ extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
 static inline void
 kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
 {
-	LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+	LASSERT(atomic_read(&hdev->ibh_ref) > 0);
 	atomic_inc(&hdev->ibh_ref);
 }
 
 static inline void
 kiblnd_hdev_decref(kib_hca_dev_t *hdev)
 {
-	LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+	LASSERT(atomic_read(&hdev->ibh_ref) > 0);
 	if (atomic_dec_and_test(&hdev->ibh_ref))
 		kiblnd_hdev_destroy(hdev);
 }
@@ -698,44 +713,44 @@ do {									\
 	}								\
 } while (0)
 
-#define kiblnd_peer_addref(peer)				\
-do {							    \
-	CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",		\
-	       (peer), libcfs_nid2str((peer)->ibp_nid),	 \
-	       atomic_read (&(peer)->ibp_refcount));	\
-	atomic_inc(&(peer)->ibp_refcount);		  \
+#define kiblnd_peer_addref(peer)                                        \
+do {                                                                    \
+	CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                        \
+	       (peer), libcfs_nid2str((peer)->ibp_nid),                 \
+	       atomic_read(&(peer)->ibp_refcount));                     \
+	atomic_inc(&(peer)->ibp_refcount);                              \
 } while (0)
 
-#define kiblnd_peer_decref(peer)				\
-do {							    \
-	CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",		\
-	       (peer), libcfs_nid2str((peer)->ibp_nid),	 \
-	       atomic_read (&(peer)->ibp_refcount));	\
-	LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);	      \
-	if (atomic_dec_and_test(&(peer)->ibp_refcount))     \
-		kiblnd_destroy_peer(peer);		      \
+#define kiblnd_peer_decref(peer)                                        \
+do {                                                                    \
+	CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                        \
+	       (peer), libcfs_nid2str((peer)->ibp_nid),                 \
+	       atomic_read(&(peer)->ibp_refcount));                     \
+	LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);                      \
+	if (atomic_dec_and_test(&(peer)->ibp_refcount))                 \
+		kiblnd_destroy_peer(peer);                              \
 } while (0)
 
 static inline struct list_head *
-kiblnd_nid2peerlist (lnet_nid_t nid)
+kiblnd_nid2peerlist(lnet_nid_t nid)
 {
 	unsigned int hash  		((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
 
-	return (&kiblnd_data.kib_peers [hash]);
+	return &kiblnd_data.kib_peers[hash];
 }
 
 static inline int
-kiblnd_peer_active (kib_peer_t *peer)
+kiblnd_peer_active(kib_peer_t *peer)
 {
 	/* Am I in the peer hash table? */
 	return (!list_empty(&peer->ibp_list));
 }
 
 static inline kib_conn_t *
-kiblnd_get_conn_locked (kib_peer_t *peer)
+kiblnd_get_conn_locked(kib_peer_t *peer)
 {
-	LASSERT (!list_empty(&peer->ibp_conns));
+	LASSERT(!list_empty(&peer->ibp_conns));
 
 	/* just return the first connection */
 	return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
@@ -752,7 +767,7 @@ kiblnd_send_keepalive(kib_conn_t *conn)
 static inline int
 kiblnd_need_noop(kib_conn_t *conn)
 {
-	LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
 	if (conn->ibc_outstanding_credits <
 	    IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
@@ -789,7 +804,7 @@ kiblnd_abort_receives(kib_conn_t *conn)
 }
 
 static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
 {
 	if (q = &conn->ibc_tx_queue)
 		return "tx_queue";
@@ -816,43 +831,43 @@ kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
 #define IBLND_WID_MASK  3UL
 
 static inline __u64
-kiblnd_ptr2wreqid (void *ptr, int type)
+kiblnd_ptr2wreqid(void *ptr, int type)
 {
 	unsigned long lptr = (unsigned long)ptr;
 
-	LASSERT ((lptr & IBLND_WID_MASK) = 0);
-	LASSERT ((type & ~IBLND_WID_MASK) = 0);
+	LASSERT((lptr & IBLND_WID_MASK) = 0);
+	LASSERT((type & ~IBLND_WID_MASK) = 0);
 	return (__u64)(lptr | type);
 }
 
 static inline void *
-kiblnd_wreqid2ptr (__u64 wreqid)
+kiblnd_wreqid2ptr(__u64 wreqid)
 {
 	return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
 }
 
 static inline int
-kiblnd_wreqid2type (__u64 wreqid)
+kiblnd_wreqid2type(__u64 wreqid)
 {
 	return (wreqid & IBLND_WID_MASK);
 }
 
 static inline void
-kiblnd_set_conn_state (kib_conn_t *conn, int state)
+kiblnd_set_conn_state(kib_conn_t *conn, int state)
 {
 	conn->ibc_state = state;
 	mb();
 }
 
 static inline void
-kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
 {
 	msg->ibm_type = type;
 	msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
 }
 
 static inline int
-kiblnd_rd_size (kib_rdma_desc_t *rd)
+kiblnd_rd_size(kib_rdma_desc_t *rd)
 {
 	int   i;
 	int   size;
@@ -888,7 +903,7 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
 		rd->rd_frags[index].rf_addr += nob;
 		rd->rd_frags[index].rf_nob  -= nob;
 	} else {
-		index ++;
+		index++;
 	}
 
 	return index;
@@ -897,8 +912,8 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
 static inline int
 kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
 {
-	LASSERT (msgtype = IBLND_MSG_GET_REQ ||
-		 msgtype = IBLND_MSG_PUT_ACK);
+	LASSERT(msgtype = IBLND_MSG_GET_REQ ||
+		msgtype = IBLND_MSG_PUT_ACK);
 
 	return msgtype = IBLND_MSG_GET_REQ ?
 	       offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
@@ -980,67 +995,68 @@ int  kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
 
 int  kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
-			 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
+			 kib_rdma_desc_t *rd, __u64 *iova,
+			 kib_phys_mr_t **pp_pmr);
 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
 
-int  kiblnd_startup (lnet_ni_t *ni);
-void kiblnd_shutdown (lnet_ni_t *ni);
-int  kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+int  kiblnd_startup(lnet_ni_t *ni);
+void kiblnd_shutdown(lnet_ni_t *ni);
+int  kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
+void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
 
 int  kiblnd_tunables_init(void);
 void kiblnd_tunables_fini(void);
 
-int  kiblnd_connd (void *arg);
+int  kiblnd_connd(void *arg);
 int  kiblnd_scheduler(void *arg);
 int  kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
-int  kiblnd_failover_thread (void *arg);
+int  kiblnd_failover_thread(void *arg);
 
 int  kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages (kib_pages_t *p);
+void kiblnd_free_pages(kib_pages_t *p);
 
 int  kiblnd_cm_callback(struct rdma_cm_id *cmid,
 			struct rdma_cm_event *event);
 int  kiblnd_translate_mtu(int value);
 
 int  kiblnd_dev_failover(kib_dev_t *dev);
-int  kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer (kib_peer_t *peer);
-void kiblnd_destroy_dev (kib_dev_t *dev);
-void kiblnd_unlink_peer_locked (kib_peer_t *peer);
-void kiblnd_peer_alive (kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
-void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
-int  kiblnd_close_stale_conns_locked (kib_peer_t *peer,
-				      int version, __u64 incarnation);
-int  kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
+int  kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(kib_peer_t *peer);
+void kiblnd_destroy_dev(kib_dev_t *dev);
+void kiblnd_unlink_peer_locked(kib_peer_t *peer);
+void kiblnd_peer_alive(kib_peer_t *peer);
+kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
+void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+int  kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+				     int version, __u64 incarnation);
+int  kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
 
 void kiblnd_connreq_done(kib_conn_t *conn, int status);
-kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
-				int state, int version);
-void kiblnd_destroy_conn (kib_conn_t *conn);
-void kiblnd_close_conn (kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
-
-int  kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
-		       int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
-void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
+kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+			       int state, int version);
+void kiblnd_destroy_conn(kib_conn_t *conn);
+void kiblnd_close_conn(kib_conn_t *conn, int error);
+void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
+
+int  kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
+		      int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+
+void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
+void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
 			 int status);
-void kiblnd_check_sends (kib_conn_t *conn);
+void kiblnd_check_sends(kib_conn_t *conn);
 
 void kiblnd_qp_event(struct ib_event *event, void *arg);
 void kiblnd_cq_event(struct ib_event *event, void *arg);
 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
 
-void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
 		      int credits, lnet_nid_t dstnid, __u64 dststamp);
 int  kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int  kiblnd_post_rx (kib_rx_t *rx, int credit);
+int  kiblnd_post_rx(kib_rx_t *rx, int credit);
 
 int  kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
 int  kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 477aa8b..250c5fa 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -50,9 +50,9 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
 
 	LASSERT(net != NULL);
 	LASSERT(!in_interrupt());
-	LASSERT(!tx->tx_queued);	       /* mustn't be queued for sending */
-	LASSERT(tx->tx_sending = 0);	  /* mustn't be awaiting sent callback */
-	LASSERT(!tx->tx_waiting);	      /* mustn't be awaiting peer response */
+	LASSERT(!tx->tx_queued);      /* mustn't be queued for sending */
+	LASSERT(tx->tx_sending = 0); /* mustn't be awaiting sent callback */
+	LASSERT(!tx->tx_waiting);     /* mustn't be awaiting peer response */
 	LASSERT(tx->tx_pool != NULL);
 
 	kiblnd_unmap_tx(ni, tx);
@@ -410,7 +410,8 @@ kiblnd_handle_rx(kib_rx_t *rx)
 		tx->tx_nwrq = 0;		/* overwrite PUT_REQ */
 
 		rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
-				       kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
+				       kiblnd_rd_size(
+					       &msg->ibm_u.putack.ibpam_rd),
 				       &msg->ibm_u.putack.ibpam_rd,
 				       msg->ibm_u.putack.ibpam_dst_cookie);
 		if (rc2 < 0)
@@ -1003,7 +1004,8 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
 
 	if (failed) {
 		if (conn->ibc_state = IBLND_CONN_ESTABLISHED)
-			CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
+			CNETERR(
+				"Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n",
 				libcfs_nid2str(conn->ibc_peer->ibp_nid),
 				tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
 				status);
@@ -1114,7 +1116,8 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		}
 
 		if (tx->tx_nwrq = IBLND_RDMA_FRAGS(conn->ibc_version)) {
-			CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
+			CERROR(
+			       "RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid),
 			       IBLND_RDMA_FRAGS(conn->ibc_version),
 			       srcidx, srcrd->rd_nfrags,
@@ -1154,7 +1157,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		sge++;
 	}
 
-	if (rc < 0)			     /* no RDMA if completing with failure */
+	if (rc < 0) /* no RDMA if completing with failure */
 		tx->tx_nwrq = 0;
 
 	ibmsg->ibm_u.completion.ibcm_status = rc;
@@ -1170,8 +1173,8 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
 {
 	struct list_head *q;
 
-	LASSERT(tx->tx_nwrq > 0);	      /* work items set up */
-	LASSERT(!tx->tx_queued);	       /* not queued for sending already */
+	LASSERT(tx->tx_nwrq > 0); /* work items set up */
+	LASSERT(!tx->tx_queued);  /* not queued for sending already */
 	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
 	tx->tx_queued = 1;
@@ -1343,8 +1346,10 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 	/* If I get here, I've committed to send, so I complete the tx with
 	 * failure on any problems */
 
-	LASSERT(tx = NULL || tx->tx_conn = NULL); /* only set when assigned a conn */
-	LASSERT(tx = NULL || tx->tx_nwrq > 0);     /* work items have been set up */
+	LASSERT(tx = NULL || tx->tx_conn = NULL); /* only set when assigned
+						     * a conn */
+	LASSERT(tx = NULL || tx->tx_nwrq > 0);     /* work items have been set
+						     * up */
 
 	/* First time, just use a read lock since I expect to find my peer
 	 * connected */
@@ -1496,7 +1501,9 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 			break;		  /* send IMMEDIATE */
 
 		/* is the REPLY message too small for RDMA? */
-		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+		nob = offsetof(kib_msg_t,
+			       ibm_u.immediate.ibim_payload[
+				       lntmsg->msg_md->md_length]);
 		if (nob <= IBLND_MSG_SIZE)
 			break;		  /* send IMMEDIATE */
 
@@ -1542,15 +1549,17 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 			return -EIO;
 		}
 
-		tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
-		tx->tx_waiting = 1;	     /* waiting for GET_DONE */
+		tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on
+					    * completion */
+		tx->tx_waiting = 1; /* waiting for GET_DONE */
 		kiblnd_launch_tx(ni, tx, target.nid);
 		return 0;
 
 	case LNET_MSG_REPLY:
 	case LNET_MSG_PUT:
 		/* Is the payload small enough not to need RDMA? */
-		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+		nob = offsetof(kib_msg_t,
+			       ibm_u.immediate.ibim_payload[payload_nob]);
 		if (nob <= IBLND_MSG_SIZE)
 			break;		  /* send IMMEDIATE */
 
@@ -1580,10 +1589,11 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 		ibmsg = tx->tx_msg;
 		ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
 		ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
-		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
+				   sizeof(kib_putreq_msg_t));
 
-		tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
-		tx->tx_waiting = 1;	     /* waiting for PUT_{ACK,NAK} */
+		tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+		tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
 		kiblnd_launch_tx(ni, tx, target.nid);
 		return 0;
 	}
@@ -1605,19 +1615,21 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 
 	if (payload_kiov != NULL)
 		lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
-				    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+				    offsetof(kib_msg_t,
+					     ibm_u.immediate.ibim_payload),
 				    payload_niov, payload_kiov,
 				    payload_offset, payload_nob);
 	else
 		lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
-				   offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+				   offsetof(kib_msg_t,
+					    ibm_u.immediate.ibim_payload),
 				   payload_niov, payload_iov,
 				   payload_offset, payload_nob);
 
 	nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
 	kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
 
-	tx->tx_lntmsg[0] = lntmsg;	      /* finalise lntmsg on completion */
+	tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
 	kiblnd_launch_tx(ni, tx, target.nid);
 	return 0;
 }
@@ -1711,7 +1723,8 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 		nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
 		if (nob > rx->rx_nob) {
 			CERROR("Immediate message from %s too big: %d(%d)\n",
-				libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
+				libcfs_nid2str(
+				      rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
 				nob, rx->rx_nob);
 			rc = -EPROTO;
 			break;
@@ -1719,22 +1732,25 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 
 		if (kiov != NULL)
 			lnet_copy_flat2kiov(niov, kiov, offset,
-					    IBLND_MSG_SIZE, rxmsg,
-					    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
-					    mlen);
+				    IBLND_MSG_SIZE, rxmsg,
+				    offsetof(kib_msg_t,
+					     ibm_u.immediate.ibim_payload),
+				    mlen);
 		else
 			lnet_copy_flat2iov(niov, iov, offset,
-					   IBLND_MSG_SIZE, rxmsg,
-					   offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
-					   mlen);
+				   IBLND_MSG_SIZE, rxmsg,
+				   offsetof(kib_msg_t,
+					    ibm_u.immediate.ibim_payload),
+				   mlen);
 		lnet_finalize(ni, lntmsg, 0);
 		break;
 
 	case IBLND_MSG_PUT_REQ:
 		if (mlen = 0) {
 			lnet_finalize(ni, lntmsg, 0);
-			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
-					       rxmsg->ibm_u.putreq.ibprm_cookie);
+			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+					0,
+					rxmsg->ibm_u.putreq.ibprm_cookie);
 			break;
 		}
 
@@ -1761,19 +1777,22 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 			       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
 			kiblnd_tx_done(ni, tx);
 			/* tell peer it's over */
-			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
-					       rxmsg->ibm_u.putreq.ibprm_cookie);
+			kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+					rc,
+					rxmsg->ibm_u.putreq.ibprm_cookie);
 			break;
 		}
 
-		nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
-		txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+		nob = offsetof(kib_putack_msg_t,
+			       ibpam_rd.rd_frags[tx->tx_nfrags]);
+		txmsg->ibm_u.putack.ibpam_src_cookie +				rxmsg->ibm_u.putreq.ibprm_cookie;
 		txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
 
 		kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
 
-		tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
-		tx->tx_waiting = 1;	     /* waiting for PUT_DONE */
+		tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+		tx->tx_waiting = 1; /* waiting for PUT_DONE */
 		kiblnd_queue_tx(tx, conn);
 
 		/* reposted buffer reserved for PUT_DONE */
@@ -1883,8 +1902,10 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
 		       libcfs_nid2str(peer->ibp_nid), error,
 		       list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
 		       list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
-		       list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
-		       list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+		       list_empty(&conn->ibc_tx_queue_rsrvd) ? "" :
+			       "(sending_rsrvd)",
+		       list_empty(&conn->ibc_tx_queue_nocred) ? "" :
+			       "(sending_nocred)",
 		       list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
 	}
 
@@ -2191,6 +2212,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	unsigned long flags;
 	int rc;
 	struct sockaddr_in *peer_addr;
+
 	LASSERT(!in_interrupt());
 
 	/* cmid inherits 'context' from the corresponding listener id */
@@ -2206,6 +2228,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	if (*kiblnd_tunables.kib_require_priv_port &&
 	    ntohs(peer_addr->sin_port) >= PROT_SOCK) {
 		__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+
 		CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
 		       &ip, ntohs(peer_addr->sin_port));
 		goto failed;
@@ -2280,8 +2303,10 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
 	if (reqmsg->ibm_u.connparams.ibcp_queue_depth ! 	    IBLND_MSG_QUEUE_SIZE(version)) {
-		CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
-		       libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
+		CERROR(
+		       "Can't accept %s: incompatible queue depth %d (%d wanted)\n",
+		       libcfs_nid2str(nid),
+		       reqmsg->ibm_u.connparams.ibcp_queue_depth,
 		       IBLND_MSG_QUEUE_SIZE(version));
 
 		if (version = IBLND_MSG_VERSION)
@@ -2292,7 +2317,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
 	if (reqmsg->ibm_u.connparams.ibcp_max_frags ! 	    IBLND_RDMA_FRAGS(version)) {
-		CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
+		CERROR(
+		       "Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
 		       libcfs_nid2str(nid), version,
 		       reqmsg->ibm_u.connparams.ibcp_max_frags,
 		       IBLND_RDMA_FRAGS(version));
@@ -2394,17 +2420,19 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 	conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
 	conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
 	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
-	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
-		 <= IBLND_RX_MSGS(version));
+	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+		IBLND_OOB_MSGS(version)
+		<= IBLND_RX_MSGS(version));
 
 	ackmsg = &conn->ibc_connvars->cv_msg;
 	memset(ackmsg, 0, sizeof(*ackmsg));
 
 	kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
 			sizeof(ackmsg->ibm_u.connparams));
-	ackmsg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
+	ackmsg->ibm_u.connparams.ibcp_queue_depth  +						IBLND_MSG_QUEUE_SIZE(version);
 	ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-	ackmsg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
+	ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
 
 	kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
 
@@ -2498,10 +2526,12 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
 		break;
 	}
 
-	CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
+	CNETERR(
+		"%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
 		libcfs_nid2str(peer->ibp_nid),
 		reason, IBLND_MSG_VERSION, version,
-		cp != NULL ? cp->ibcp_queue_depth  : IBLND_MSG_QUEUE_SIZE(version),
+		cp != NULL ? cp->ibcp_queue_depth  :
+			     IBLND_MSG_QUEUE_SIZE(version),
 		cp != NULL ? cp->ibcp_max_frags    : IBLND_RDMA_FRAGS(version),
 		cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
 
@@ -2573,14 +2603,16 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
 			if (rej->ibr_magic != IBLND_MSG_MAGIC &&
 			    rej->ibr_magic != LNET_PROTO_MAGIC) {
-				CERROR("%s rejected: consumer defined fatal error\n",
+				CERROR(
+				       "%s rejected: consumer defined fatal error\n",
 				       libcfs_nid2str(peer->ibp_nid));
 				break;
 			}
 
 			if (rej->ibr_version != IBLND_MSG_VERSION &&
 			    rej->ibr_version != IBLND_MSG_VERSION_1) {
-				CERROR("%s rejected: o2iblnd version %x error\n",
+				CERROR(
+				       "%s rejected: o2iblnd version %x error\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       rej->ibr_version);
 				break;
@@ -2588,11 +2620,14 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
 			if (rej->ibr_why     = IBLND_REJECT_FATAL &&
 			    rej->ibr_version = IBLND_MSG_VERSION_1) {
-				CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
-				       libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
+				CDEBUG(D_NET,
+				       "rejected by old version peer %s: %x\n",
+				       libcfs_nid2str(peer->ibp_nid),
+				       rej->ibr_version);
 
 				if (conn->ibc_version != IBLND_MSG_VERSION_1)
-					rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+					rej->ibr_why +						IBLND_REJECT_CONN_UNCOMPAT;
 			}
 
 			switch (rej->ibr_why) {
@@ -2604,7 +2639,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 				break;
 
 			case IBLND_REJECT_MSG_QUEUE_SIZE:
-				CERROR("%s rejected: incompatible message queue depth %d, %d\n",
+				CERROR(
+				       "%s rejected: incompatible message queue depth %d, %d\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       cp != NULL ? cp->ibcp_queue_depth :
 				       IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
@@ -2612,7 +2648,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 				break;
 
 			case IBLND_REJECT_RDMA_FRAGS:
-				CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
+				CERROR(
+				       "%s rejected: incompatible # of RDMA fragments %d, %d\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       cp != NULL ? cp->ibcp_max_frags :
 				       IBLND_RDMA_FRAGS(rej->ibr_version),
@@ -2674,7 +2711,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 	}
 
 	if (ver != msg->ibm_version) {
-		CERROR("%s replied version %x is different with requested version %x\n",
+		CERROR(
+		       "%s replied version %x is different with requested version %x\n",
 		       libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
 		rc = -EPROTO;
 		goto failed;
@@ -2718,7 +2756,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
 	if (rc != 0) {
-		CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
+		CERROR(
+		       "Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
 		       libcfs_nid2str(peer->ibp_nid), rc,
 		       msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
 		goto failed;
@@ -2727,7 +2766,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 	conn->ibc_incarnation = msg->ibm_srcstamp;
 	conn->ibc_credits  	conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
-	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
+	LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+		IBLND_OOB_MSGS(ver)
 		 <= IBLND_RX_MSGS(ver));
 
 	kiblnd_connreq_done(conn, 0);
@@ -2764,7 +2804,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 
 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-	conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
+	conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
+				  version);
 	if (conn = NULL) {
 		kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
 		kiblnd_peer_decref(peer); /* lose cmid's ref */
@@ -3054,7 +3095,8 @@ kiblnd_check_conns(int idx)
 			}
 
 			if (timedout) {
-				CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
+				CERROR(
+				       "Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
 				       libcfs_nid2str(peer->ibp_nid),
 				       cfs_duration_sec(cfs_time_current() -
 							peer->ibp_last_alive),
@@ -3133,8 +3175,7 @@ kiblnd_connd(void *arg)
 		dropped_lock = 0;
 
 		if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
-			conn = list_entry(kiblnd_data. \
-					      kib_connd_zombies.next,
+			conn = list_entry(kiblnd_data.kib_connd_zombies.next,
 					      kib_conn_t, ibc_list);
 			list_del(&conn->ibc_list);
 
@@ -3169,7 +3210,8 @@ kiblnd_connd(void *arg)
 			const int p = 1;
 			int chunk = kiblnd_data.kib_peer_hash_size;
 
-			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+			spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+					       flags);
 			dropped_lock = 1;
 
 			/* Time to check for RDMA timeouts on a few more
@@ -3325,7 +3367,8 @@ kiblnd_scheduler(void *arg)
 
 	rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
 	if (rc != 0) {
-		CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
+		CWARN(
+		      "Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
 		      sched->ibs_cpt);
 	}
 
@@ -3358,8 +3401,11 @@ kiblnd_scheduler(void *arg)
 				rc = ib_req_notify_cq(conn->ibc_cq,
 						      IB_CQ_NEXT_COMP);
 				if (rc < 0) {
-					CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
-					      libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+					CWARN(
+					      "%s: ib_req_notify_cq failed: %d, closing connection\n",
+					      libcfs_nid2str(
+						      conn->ibc_peer->ibp_nid),
+					      rc);
 					kiblnd_close_conn(conn, -EIO);
 					kiblnd_conn_decref(conn);
 					spin_lock_irqsave(&sched->ibs_lock,
@@ -3371,7 +3417,8 @@ kiblnd_scheduler(void *arg)
 			}
 
 			if (rc < 0) {
-				CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
+				CWARN(
+				      "%s: ib_poll_cq failed: %d, closing connection\n",
 				      libcfs_nid2str(conn->ibc_peer->ibp_nid),
 				      rc);
 				kiblnd_close_conn(conn, -EIO);
@@ -3403,7 +3450,7 @@ kiblnd_scheduler(void *arg)
 				spin_lock_irqsave(&sched->ibs_lock, flags);
 			}
 
-			kiblnd_conn_decref(conn); /* ...drop my ref from above */
+			kiblnd_conn_decref(conn);/* ...drop my ref from above */
 			did_something = 1;
 		}
 
@@ -3474,7 +3521,8 @@ kiblnd_failover_thread(void *arg)
 
 			/* failed to failover, retry later */
 			dev->ibd_next_failover -				cfs_time_shift(min(dev->ibd_failed_failover, 10));
+				cfs_time_shift(min(dev->ibd_failed_failover,
+						   10));
 			if (kiblnd_dev_can_failover(dev)) {
 				list_add_tail(&dev->ibd_fail_list,
 					      &kiblnd_data.kib_failed_devs);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b0e0036..90f32ca 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -192,38 +192,55 @@ kiblnd_tunables_init(void)
 		*kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
 
 	if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
-		*kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
+		*kiblnd_tunables.kib_peertxcredits +				*kiblnd_tunables.kib_credits;
 
-	if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
-		*kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
+	if (*kiblnd_tunables.kib_peercredits_hiw <
+	    *kiblnd_tunables.kib_peertxcredits / 2)
+		*kiblnd_tunables.kib_peercredits_hiw +				*kiblnd_tunables.kib_peertxcredits / 2;
 
-	if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
-		*kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
+	if (*kiblnd_tunables.kib_peercredits_hiw >+	    *kiblnd_tunables.kib_peertxcredits)
+		*kiblnd_tunables.kib_peercredits_hiw +				*kiblnd_tunables.kib_peertxcredits - 1;
 
 	if (*kiblnd_tunables.kib_map_on_demand < 0 ||
 	    *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
-		*kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+		/* disable map-on-demand */
+		*kiblnd_tunables.kib_map_on_demand = 0;
 
 	if (*kiblnd_tunables.kib_map_on_demand = 1)
-		*kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
+		/* don't make sense to create map if only one fragment */
+		*kiblnd_tunables.kib_map_on_demand = 2;
 
 	if (*kiblnd_tunables.kib_concurrent_sends = 0) {
 		if (*kiblnd_tunables.kib_map_on_demand > 0 &&
-		    *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
-			*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
+		    *kiblnd_tunables.kib_map_on_demand <+		    IBLND_MAX_RDMA_FRAGS / 8)
+			*kiblnd_tunables.kib_concurrent_sends +				(*kiblnd_tunables.kib_peertxcredits) * 2;
 		else
-			*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
+			*kiblnd_tunables.kib_concurrent_sends +				(*kiblnd_tunables.kib_peertxcredits);
 	}
 
-	if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
-		*kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
-
-	if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
-		*kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
-
-	if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
-		CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
-		      *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
+	if (*kiblnd_tunables.kib_concurrent_sends >
+	    *kiblnd_tunables.kib_peertxcredits * 2)
+		*kiblnd_tunables.kib_concurrent_sends +				*kiblnd_tunables.kib_peertxcredits * 2;
+
+	if (*kiblnd_tunables.kib_concurrent_sends <
+	    *kiblnd_tunables.kib_peertxcredits / 2)
+		*kiblnd_tunables.kib_concurrent_sends +				*kiblnd_tunables.kib_peertxcredits / 2;
+
+	if (*kiblnd_tunables.kib_concurrent_sends <
+	    *kiblnd_tunables.kib_peertxcredits) {
+		CWARN(
+		      "Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
+		      *kiblnd_tunables.kib_concurrent_sends,
+		      *kiblnd_tunables.kib_peertxcredits);
 	}
 
 	return 0;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (19 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 11/13] staging: lnet: o2iblnd: " Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-21 19:50 ` [PATCH 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
  2015-05-22  9:21 ` [PATCH v4 00/13] staging: lustre: lnet: code cleanups Dan Carpenter
  22 siblings, 0 replies; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

More checkpatch.pl warnings & code cleanup.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 .../staging/lustre/lnet/klnds/socklnd/socklnd.c    |   94 +++--
 .../staging/lustre/lnet/klnds/socklnd/socklnd.h    |   20 +-
 .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c |  419 +++++++++++---------
 .../lustre/lnet/klnds/socklnd/socklnd_lib-linux.c  |   24 +-
 .../lustre/lnet/klnds/socklnd/socklnd_lib-linux.h  |    2 +-
 .../lustre/lnet/klnds/socklnd/socklnd_proto.c      |   48 ++-
 6 files changed, 351 insertions(+), 256 deletions(-)

diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 7b5d407..286ef3f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -337,7 +337,8 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 			       &route->ksnr_ipaddr,
 			       &conn->ksnc_myipaddr);
 		} else {
-			CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
+			CDEBUG(D_NET,
+			       "Rebinding %s %pI4h from %pI4h to %pI4h\n",
 			       libcfs_id2str(peer->ksnp_id),
 			       &route->ksnr_ipaddr,
 			       &route->ksnr_myipaddr,
@@ -576,7 +577,8 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
 	if (id.nid != LNET_NID_ANY)
-		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) -
+				ksocknal_data.ksnd_peers);
 	else {
 		lo = 0;
 		hi = ksocknal_data.ksnd_peer_hash_size - 1;
@@ -590,8 +592,10 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 			if (peer->ksnp_ni != ni)
 				continue;
 
-			if (!((id.nid = LNET_NID_ANY || peer->ksnp_id.nid = id.nid) &&
-			      (id.pid = LNET_PID_ANY || peer->ksnp_id.pid = id.pid)))
+			if (!((id.nid = LNET_NID_ANY ||
+			       peer->ksnp_id.nid = id.nid) &&
+			      (id.pid = LNET_PID_ANY ||
+			       peer->ksnp_id.pid = id.pid)))
 				continue;
 
 			ksocknal_peer_addref(peer);     /* a ref for me... */
@@ -805,12 +809,15 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 					if (peer->ksnp_passive_ips[k] = ip)
 						break;
 
-				if (k < peer->ksnp_n_passive_ips) /* using it already */
+				/* using it already */
+				if (k < peer->ksnp_n_passive_ips)
 					continue;
 
-				k = ksocknal_match_peerip(iface, peerips, n_peerips);
+				k = ksocknal_match_peerip(iface, peerips,
+							  n_peerips);
 				xor = ip ^ peerips[k];
-				this_netmatch = ((xor & iface->ksni_netmask) = 0) ? 1 : 0;
+				this_netmatch = ((xor & iface->ksni_netmask) =
+						 0) ? 1 : 0;
 
 				if (!(best_iface = NULL ||
 				      best_netmatch < this_netmatch ||
@@ -973,7 +980,8 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
 
 	LIBCFS_ALLOC(cr, sizeof(*cr));
 	if (cr = NULL) {
-		LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
+		LCONSOLE_ERROR_MSG(0x12f,
+				   "Dropping connection request from %pI4h: memory exhausted\n",
 				   &peer_ip);
 		return -ENOMEM;
 	}
@@ -1257,7 +1265,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
 	conn->ksnc_tx_last_post = cfs_time_current();
 	/* Set the deadline for the outgoing HELLO to drain */
 	conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
-	conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+	conn->ksnc_tx_deadline +		cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 	mb();   /* order with adding to peer's conn list */
 
 	list_add(&conn->ksnc_list, &peer->ksnp_conns);
@@ -1269,7 +1278,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
 
 	/* Take packets blocking for this connection. */
 	list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
-		if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) = SOCKNAL_MATCH_NO)
+		if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) =
+		    SOCKNAL_MATCH_NO)
 				continue;
 
 		list_del(&tx->tx_list);
@@ -1286,7 +1296,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
 	 *	socket callbacks.
 	 */
 
-	CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
+	CDEBUG(D_NET,
+	       "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
 	       libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
 	       &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
 	       conn->ksnc_port, incarnation, cpt,
@@ -1457,8 +1468,8 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
 			spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
 		}
 
-		peer->ksnp_proto = NULL;	/* renegotiate protocol version */
-		peer->ksnp_error = error;       /* stash last conn close reason */
+		peer->ksnp_proto = NULL;  /* renegotiate protocol version */
+		peer->ksnp_error = error; /* stash last conn close reason */
 
 		if (list_empty(&peer->ksnp_routes)) {
 			/* I've just closed last conn belonging to a
@@ -1635,7 +1646,8 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
 	case SOCKNAL_RX_LNET_PAYLOAD:
 		last_rcv = conn->ksnc_rx_deadline -
 			   cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
-		CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
+		CERROR(
+		       "Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
 		       libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
 		       &conn->ksnc_ipaddr, conn->ksnc_port,
 		       conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
@@ -1646,21 +1658,24 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
 		break;
 	case SOCKNAL_RX_LNET_HEADER:
 		if (conn->ksnc_rx_started)
-			CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
+			CERROR(
+			       "Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 			       &conn->ksnc_ipaddr, conn->ksnc_port,
 			       conn->ksnc_proto->pro_version);
 		break;
 	case SOCKNAL_RX_KSM_HEADER:
 		if (conn->ksnc_rx_started)
-			CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
+			CERROR(
+			       "Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 			       &conn->ksnc_ipaddr, conn->ksnc_port,
 			       conn->ksnc_proto->pro_version);
 		break;
 	case SOCKNAL_RX_SLOP:
 		if (conn->ksnc_rx_started)
-			CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
+			CERROR(
+			       "Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
 			       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 			       &conn->ksnc_ipaddr, conn->ksnc_port);
 	       break;
@@ -1725,7 +1740,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
 	if (id.nid != LNET_NID_ANY)
-		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+		lo = hi = (int)(ksocknal_nid2peerlist(id.nid) -
+				ksocknal_data.ksnd_peers);
 	else {
 		lo = 0;
 		hi = ksocknal_data.ksnd_peer_hash_size - 1;
@@ -1737,11 +1753,14 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
 
 			peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 
-			if (!((id.nid = LNET_NID_ANY || id.nid = peer->ksnp_id.nid) &&
-			      (id.pid = LNET_PID_ANY || id.pid = peer->ksnp_id.pid)))
+			if (!((id.nid = LNET_NID_ANY ||
+			       id.nid = peer->ksnp_id.nid) &&
+			      (id.pid = LNET_PID_ANY ||
+			       id.pid = peer->ksnp_id.pid)))
 				continue;
 
-			count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+			count += ksocknal_close_peer_conns_locked(peer,
+								  ipaddr, 0);
 		}
 	}
 
@@ -1805,7 +1824,8 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 			if (bufnob < conn->ksnc_tx_bufnob) {
 				/* something got ACKed */
 				conn->ksnc_tx_deadline -					cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+					cfs_time_shift(
+					    *ksocknal_tunables.ksnd_timeout);
 				peer->ksnp_last_alive = now;
 				conn->ksnc_tx_bufnob = bufnob;
 			}
@@ -1838,7 +1858,6 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 		ksocknal_launch_all_connections_locked(peer);
 
 	write_unlock_bh(glock);
-	return;
 }
 
 static void
@@ -1962,7 +1981,8 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 						      ksnp_list);
 
 				for (j = 0; j < peer->ksnp_n_passive_ips; j++)
-					if (peer->ksnp_passive_ips[j] = ipaddress)
+					if (peer->ksnp_passive_ips[j] =
+					    ipaddress)
 						iface->ksni_npeers++;
 
 				list_for_each(rtmp, &peer->ksnp_routes) {
@@ -1977,7 +1997,8 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 		}
 
 		rc = 0;
-		/* NB only new connections will pay attention to the new interface! */
+		/* NB only new connections will pay attention to the new
+		 * interface! */
 	}
 
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2064,7 +2085,8 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
 				if (peer->ksnp_ni != ni)
 					continue;
 
-				ksocknal_peer_del_interface_locked(peer, this_ip);
+				ksocknal_peer_del_interface_locked(peer,
+								   this_ip);
 			}
 		}
 	}
@@ -2111,7 +2133,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 
 	case IOC_LIBCFS_DEL_INTERFACE:
 		return ksocknal_del_interface(ni,
-					      data->ioc_u32[0]); /* IP address */
+					      data->ioc_u32[0]);/* IP address */
 
 	case IOC_LIBCFS_GET_PEER: {
 		__u32 myip = 0;
@@ -2153,7 +2175,8 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 		int txmem;
 		int rxmem;
 		int nagle;
-		ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+		ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni,
+							      data->ioc_count);
 
 		if (conn = NULL)
 			return -ENOENT;
@@ -2263,9 +2286,8 @@ ksocknal_base_shutdown(void)
 	case SOCKNAL_INIT_ALL:
 	case SOCKNAL_INIT_DATA:
 		LASSERT(ksocknal_data.ksnd_peers != NULL);
-		for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+		for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
 			LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
-		}
 
 		LASSERT(list_empty(&ksocknal_data.ksnd_nets));
 		LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
@@ -2315,7 +2337,8 @@ ksocknal_base_shutdown(void)
 		read_lock(&ksocknal_data.ksnd_global_lock);
 		while (ksocknal_data.ksnd_nthreads != 0) {
 			i++;
-			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET, /* power of 2? */
+			/* power of 2? */
+			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET,
 			       "waiting for %d threads to terminate\n",
 				ksocknal_data.ksnd_nthreads);
 			read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2446,6 +2469,7 @@ ksocknal_base_startup(void)
 
 	for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
 		char name[16];
+
 		spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 		ksocknal_data.ksnd_connd_starting++;
 		spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
@@ -2503,7 +2527,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 		ksock_route_t *route;
 		ksock_conn_t  *conn;
 
-		CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
+		CWARN(
+		      "Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
 		      libcfs_id2str(peer->ksnp_id),
 		      atomic_read(&peer->ksnp_refcount),
 		      peer->ksnp_sharecount, peer->ksnp_closing,
@@ -2514,7 +2539,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 
 		list_for_each(tmp, &peer->ksnp_routes) {
 			route = list_entry(tmp, ksock_route_t, ksnr_list);
-			CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
+			CWARN(
+			      "Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
 			      atomic_read(&route->ksnr_refcount),
 			      route->ksnr_scheduled, route->ksnr_connecting,
 			      route->ksnr_connected, route->ksnr_deleted);
@@ -2530,7 +2556,6 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 	}
 
 	read_unlock(&ksocknal_data.ksnd_global_lock);
-	return;
 }
 
 void
@@ -2711,6 +2736,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
 		long id;
 		char name[20];
 		ksock_sched_t *sched;
+
 		id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
 		sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
 		snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index c34378c..d270263 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -139,7 +139,7 @@ typedef struct {
 typedef struct {
 	__u64		  ksnn_incarnation;	/* my epoch */
 	spinlock_t	  ksnn_lock;		/* serialise */
-	struct list_head	  ksnn_list;		/* chain on global list */
+	struct list_head  ksnn_list;		/* chain on global list */
 	int		  ksnn_npeers;		/* # peers */
 	int		  ksnn_shutdown;	/* shutting down? */
 	int		  ksnn_ninterfaces;	/* IP interfaces */
@@ -474,7 +474,8 @@ ksocknal_route_mask(void)
 static inline struct list_head *
 ksocknal_nid2peerlist(lnet_nid_t nid)
 {
-	unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
+	unsigned int hash = ((unsigned int)nid) %
+			    ksocknal_data.ksnd_peer_hash_size;
 
 	return &ksocknal_data.ksnd_peers[hash];
 }
@@ -587,8 +588,10 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 		  unsigned int offset, unsigned int mlen, unsigned int rlen);
 int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
 
-extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip,
+			     int port);
+extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni,
+					       lnet_process_id_t id);
 extern ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
 extern void ksocknal_peer_failed(ksock_peer_t *peer);
 extern int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
@@ -613,11 +616,13 @@ extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
 extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
 				  int error);
 extern void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid,
+			   unsigned long *when);
 extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
 extern void ksocknal_thread_fini(void);
 extern void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
+extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t
+							     *peer);
 extern ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
 extern int ksocknal_new_packet(ksock_conn_t *conn, int skip);
 extern int ksocknal_scheduler(void *arg);
@@ -634,7 +639,8 @@ extern void ksocknal_write_callback(ksock_conn_t *conn);
 extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
 extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
 extern void ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn);
-extern void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
+extern void ksocknal_lib_reset_callback(struct socket *sock,
+					ksock_conn_t *conn);
 extern void ksocknal_lib_push_conn(ksock_conn_t *conn);
 extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
 extern int ksocknal_lib_setup_sock(struct socket *so);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index a1a4ac0..38c85ae 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -38,8 +38,8 @@ ksocknal_alloc_tx(int type, int size)
 		spin_lock(&ksocknal_data.ksnd_tx_lock);
 
 		if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-			tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
-					    next, ksock_tx_t, tx_list);
+			tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
+					ksock_tx_t, tx_list);
 			LASSERT(tx->tx_desc_size = size);
 			list_del(&tx->tx_list);
 		}
@@ -91,7 +91,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
 
 
 void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(ksock_tx_t *tx)
 {
 	atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
@@ -108,7 +108,7 @@ ksocknal_free_tx (ksock_tx_t *tx)
 }
 
 static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	struct kvec *iov = tx->tx_iov;
 	int nob;
@@ -123,7 +123,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
 		return rc;
 
 	nob = rc;
-	LASSERT (nob <= tx->tx_resid);
+	LASSERT(nob <= tx->tx_resid);
 	tx->tx_resid -= nob;
 
 	/* "consume" iov */
@@ -145,7 +145,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	lnet_kiov_t *kiov = tx->tx_kiov;
 	int nob;
@@ -161,7 +161,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
 		return rc;
 
 	nob = rc;
-	LASSERT (nob <= tx->tx_resid);
+	LASSERT(nob <= tx->tx_resid);
 	tx->tx_resid -= nob;
 
 	/* "consume" kiov */
@@ -183,7 +183,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	int rc;
 	int bufnob;
@@ -197,7 +197,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 
 	rc = ksocknal_connsock_addref(conn);
 	if (rc != 0) {
-		LASSERT (conn->ksnc_closing);
+		LASSERT(conn->ksnc_closing);
 		return -ESHUTDOWN;
 	}
 
@@ -207,9 +207,9 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 			ksocknal_data.ksnd_enomem_tx--;
 			rc = -EAGAIN;
 		} else if (tx->tx_niov != 0) {
-			rc = ksocknal_send_iov (conn, tx);
+			rc = ksocknal_send_iov(conn, tx);
 		} else {
-			rc = ksocknal_send_kiov (conn, tx);
+			rc = ksocknal_send_kiov(conn, tx);
 		}
 
 		bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
@@ -228,7 +228,8 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 
 		if (rc <= 0) { /* Didn't write anything? */
 
-			if (rc = 0) /* some stacks return 0 instead of -EAGAIN */
+			/* some stacks return 0 instead of -EAGAIN */
+			if (rc = 0)
 				rc = -EAGAIN;
 
 			/* Check if EAGAIN is due to memory pressure */
@@ -239,7 +240,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 		}
 
 		/* socket's wmem_queued now includes 'rc' bytes */
-		atomic_sub (rc, &conn->ksnc_tx_nob);
+		atomic_sub(rc, &conn->ksnc_tx_nob);
 		rc = 0;
 
 	} while (tx->tx_resid != 0);
@@ -249,7 +250,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(ksock_conn_t *conn)
 {
 	struct kvec *iov = conn->ksnc_rx_iov;
 	int nob;
@@ -294,11 +295,12 @@ ksocknal_recv_iov (ksock_conn_t *conn)
 }
 
 static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(ksock_conn_t *conn)
 {
 	lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
 	int nob;
 	int rc;
+
 	LASSERT(conn->ksnc_rx_nkiov > 0);
 
 	/* Never touch conn->ksnc_rx_kiov or change connection
@@ -338,7 +340,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
 }
 
 static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(ksock_conn_t *conn)
 {
 	/* Return 1 on success, 0 on EOF, < 0 on error.
 	 * Caller checks ksnc_rx_nob_wanted to determine
@@ -352,15 +354,15 @@ ksocknal_receive (ksock_conn_t *conn)
 
 	rc = ksocknal_connsock_addref(conn);
 	if (rc != 0) {
-		LASSERT (conn->ksnc_closing);
+		LASSERT(conn->ksnc_closing);
 		return -ESHUTDOWN;
 	}
 
 	for (;;) {
 		if (conn->ksnc_rx_niov != 0)
-			rc = ksocknal_recv_iov (conn);
+			rc = ksocknal_recv_iov(conn);
 		else
-			rc = ksocknal_recv_kiov (conn);
+			rc = ksocknal_recv_kiov(conn);
 
 		if (rc <= 0) {
 			/* error/EOF or partial receive */
@@ -386,7 +388,7 @@ ksocknal_receive (ksock_conn_t *conn)
 }
 
 void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
 {
 	lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
 	int rc = (tx->tx_resid = 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
@@ -399,25 +401,30 @@ ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
 	if (ni = NULL && tx->tx_conn != NULL)
 		ni = tx->tx_conn->ksnc_peer->ksnp_ni;
 
-	ksocknal_free_tx (tx);
+	ksocknal_free_tx(tx);
 	if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
-		lnet_finalize (ni, lnetmsg, rc);
+		lnet_finalize(ni, lnetmsg, rc);
 }
 
 void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
 {
 	ksock_tx_t *tx;
 
-	while (!list_empty (txlist)) {
+	while (!list_empty(txlist)) {
 		tx = list_entry(txlist->next, ksock_tx_t, tx_list);
 
 		if (error && tx->tx_lnetmsg != NULL) {
 			CNETERR("Deleting packet type %d len %d %s->%s\n",
-				le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
-				le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
-				libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
-				libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
+				le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
+				le32_to_cpu(
+					tx->tx_lnetmsg->msg_hdr.payload_length),
+				libcfs_nid2str(
+					le64_to_cpu(
+					tx->tx_lnetmsg->msg_hdr.src_nid)),
+				libcfs_nid2str(
+					le64_to_cpu(
+					tx->tx_lnetmsg->msg_hdr.dest_nid)));
 		} else if (error) {
 			CNETERR("Deleting noop packet\n");
 		}
@@ -500,20 +507,20 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
 }
 
 static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	int rc;
 
 	if (tx->tx_zc_capable && !tx->tx_zc_checked)
 		ksocknal_check_zc_req(tx);
 
-	rc = ksocknal_transmit (conn, tx);
+	rc = ksocknal_transmit(conn, tx);
 
 	CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
 
 	if (tx->tx_resid = 0) {
 		/* Sent everything OK */
-		LASSERT (rc = 0);
+		LASSERT(rc = 0);
 
 		return 0;
 	}
@@ -533,13 +540,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 		/* enomem list takes over scheduler's ref... */
-		LASSERT (conn->ksnc_tx_scheduled);
+		LASSERT(conn->ksnc_tx_scheduled);
 		list_add_tail(&conn->ksnc_tx_list,
 				  &ksocknal_data.ksnd_enomem_conns);
 		if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
 						   SOCKNAL_ENOMEM_RETRY),
 				   ksocknal_data.ksnd_reaper_waketime))
-			wake_up (&ksocknal_data.ksnd_reaper_waitq);
+			wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
 		spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 		return rc;
@@ -551,11 +558,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 	if (!conn->ksnc_closing) {
 		switch (rc) {
 		case -ECONNRESET:
-			LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
+			LCONSOLE_WARN(
+				      "Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
 				      &conn->ksnc_ipaddr);
 			break;
 		default:
-			LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
+			LCONSOLE_WARN(
+				      "There was an unexpected network error while writing to %pI4h: %d.\n",
 				      &conn->ksnc_ipaddr, rc);
 			break;
 		}
@@ -570,14 +579,14 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
 		ksocknal_uncheck_zc_req(tx);
 
 	/* it's not an error if conn is being closed */
-	ksocknal_close_conn_and_siblings (conn,
-					  (conn->ksnc_closing) ? 0 : rc);
+	ksocknal_close_conn_and_siblings(conn,
+					 (conn->ksnc_closing) ? 0 : rc);
 
 	return rc;
 }
 
 static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(ksock_route_t *route)
 {
 
 	/* called holding write lock on ksnd_global_lock */
@@ -599,7 +608,7 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
 }
 
 void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
 {
 	ksock_route_t *route;
 
@@ -624,10 +633,10 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 	int tnob = 0;
 	int fnob = 0;
 
-	list_for_each (tmp, &peer->ksnp_conns) {
+	list_for_each(tmp, &peer->ksnp_conns) {
 		ksock_conn_t *c  = list_entry(tmp, ksock_conn_t, ksnc_list);
 		int nob = atomic_read(&c->ksnc_tx_nob) +
-                                      c->ksnc_sock->sk->sk_wmem_queued;
+				      c->ksnc_sock->sk->sk_wmem_queued;
 		int rc;
 
 		LASSERT(!c->ksnc_closing);
@@ -644,8 +653,10 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 
 		case SOCKNAL_MATCH_YES: /* typed connection */
 			if (typed = NULL || tnob > nob ||
-			    (tnob = nob && *ksocknal_tunables.ksnd_round_robin &&
-			     cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+			    (tnob = nob &&
+			     *ksocknal_tunables.ksnd_round_robin &&
+			     cfs_time_after(typed->ksnc_tx_last_post,
+			     c->ksnc_tx_last_post))) {
 				typed = c;
 				tnob  = nob;
 			}
@@ -653,8 +664,10 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 
 		case SOCKNAL_MATCH_MAY: /* fallback connection */
 			if (fallback = NULL || fnob > nob ||
-			    (fnob = nob && *ksocknal_tunables.ksnd_round_robin &&
-			     cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+			    (fnob = nob &&
+			     *ksocknal_tunables.ksnd_round_robin &&
+			     cfs_time_after(fallback->ksnc_tx_last_post,
+			     c->ksnc_tx_last_post))) {
 				fallback = c;
 				fnob = nob;
 			}
@@ -676,13 +689,13 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
 {
 	conn->ksnc_proto->pro_pack(tx);
 
-	atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+	atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
 	ksocknal_conn_addref(conn); /* +1 ref for tx */
 	tx->tx_conn = conn;
 }
 
 void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
 {
 	ksock_sched_t *sched = conn->ksnc_scheduler;
 	ksock_msg_t *msg = &tx->tx_msg;
@@ -708,14 +721,14 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 	 *
 	 * We always expect at least 1 mapped fragment containing the
 	 * complete ksocknal message header. */
-	LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+	LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
 		lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) =
 		(unsigned int)tx->tx_nob);
 	LASSERT(tx->tx_niov >= 1);
 	LASSERT(tx->tx_resid = tx->tx_nob);
 
-	CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
-		tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
+	CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+		tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
 					       KSOCK_MSG_NOOP,
 		tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
 
@@ -743,7 +756,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
 
 		if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
-			ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
+			ztx = tx; /* ZC ACK piggybacked on ztx release
+				   * tx later */
 
 	} else {
 		/* It's a normal packet - can it piggback a noop zc-ack that
@@ -756,7 +770,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 	}
 
 	if (ztx != NULL) {
-		atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+		atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
 		list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
 	}
 
@@ -764,10 +778,10 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 	    !conn->ksnc_tx_scheduled) { /* not scheduled to send */
 		/* +1 ref for scheduler */
 		ksocknal_conn_addref(conn);
-		list_add_tail (&conn->ksnc_tx_list,
+		list_add_tail(&conn->ksnc_tx_list,
 				   &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 
 	spin_unlock_bh(&sched->kss_lock);
@@ -775,18 +789,18 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
 
 
 ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
 {
 	unsigned long now = cfs_time_current();
 	struct list_head *tmp;
 	ksock_route_t *route;
 
-	list_for_each (tmp, &peer->ksnp_routes) {
-		route = list_entry (tmp, ksock_route_t, ksnr_list);
+	list_for_each(tmp, &peer->ksnp_routes) {
+		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
-		if (route->ksnr_scheduled)      /* connections being established */
+		if (route->ksnr_scheduled) /* connections being established */
 			continue;
 
 		/* all route types connected ? */
@@ -811,13 +825,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
 }
 
 ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
 {
 	struct list_head *tmp;
 	ksock_route_t *route;
 
-	list_for_each (tmp, &peer->ksnp_routes) {
-		route = list_entry (tmp, ksock_route_t, ksnr_list);
+	list_for_each(tmp, &peer->ksnp_routes) {
+		route = list_entry(tmp, ksock_route_t, ksnr_list);
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -829,7 +843,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
 }
 
 int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 {
 	ksock_peer_t *peer;
 	ksock_conn_t *conn;
@@ -845,13 +859,15 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 		read_lock(g_lock);
 		peer = ksocknal_find_peer_locked(ni, id);
 		if (peer != NULL) {
-			if (ksocknal_find_connectable_route_locked(peer) = NULL) {
-				conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
+			if (ksocknal_find_connectable_route_locked(peer) =
+			    NULL) {
+				conn = ksocknal_find_conn_locked(peer, tx,
+								 tx->tx_nonblk);
 				if (conn != NULL) {
 					/* I've got no routes that need to be
 					 * connecting and I do have an actual
 					 * connection... */
-					ksocknal_queue_tx_locked (tx, conn);
+					ksocknal_queue_tx_locked(tx, conn);
 					read_unlock(g_lock);
 					return 0;
 				}
@@ -870,7 +886,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 		write_unlock_bh(g_lock);
 
 		if ((id.pid & LNET_PID_USERFLAG) != 0) {
-			CERROR("Refusing to create a connection to userspace process %s\n",
+			CERROR(
+			       "Refusing to create a connection to userspace process %s\n",
 			       libcfs_id2str(id));
 			return -EHOSTUNREACH;
 		}
@@ -895,19 +912,19 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 	conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
 	if (conn != NULL) {
 		/* Connection exists; queue message on it */
-		ksocknal_queue_tx_locked (tx, conn);
+		ksocknal_queue_tx_locked(tx, conn);
 		write_unlock_bh(g_lock);
 		return 0;
 	}
 
 	if (peer->ksnp_accepting > 0 ||
-	    ksocknal_find_connecting_route_locked (peer) != NULL) {
+	    ksocknal_find_connecting_route_locked(peer) != NULL) {
 		/* the message is going to be pinned to the peer */
 		tx->tx_deadline  			cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
 		/* Queue the message until a connection is established */
-		list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+		list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
 		write_unlock_bh(g_lock);
 		return 0;
 	}
@@ -943,8 +960,8 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 	LASSERT(payload_nob = 0 || payload_niov > 0);
 	LASSERT(payload_niov <= LNET_MAX_IOV);
 	/* payload is either all vaddrs or all pages */
-	LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-	LASSERT (!in_interrupt ());
+	LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
+	LASSERT(!in_interrupt());
 
 	if (payload_iov != NULL)
 		desc_size = offsetof(ksock_tx_t,
@@ -1016,7 +1033,7 @@ ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
 }
 
 void
-ksocknal_thread_fini (void)
+ksocknal_thread_fini(void)
 {
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 	ksocknal_data.ksnd_nthreads--;
@@ -1024,7 +1041,7 @@ ksocknal_thread_fini (void)
 }
 
 int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
 {
 	static char ksocknal_slop_buffer[4096];
 
@@ -1047,12 +1064,14 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 		case  KSOCK_PROTO_V2:
 		case  KSOCK_PROTO_V3:
 			conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
-			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
+			conn->ksnc_rx_iov +					(struct kvec *)&conn->ksnc_rx_iov_space;
 			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
 
 			conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
 			conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
-			conn->ksnc_rx_iov[0].iov_len  = offsetof(ksock_msg_t, ksm_u);
+			conn->ksnc_rx_iov[0].iov_len +					offsetof(ksock_msg_t, ksm_u);
 			break;
 
 		case KSOCK_PROTO_V1:
@@ -1061,13 +1080,15 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 			conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
 			conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
 
-			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
-			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
-			conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+			conn->ksnc_rx_iov +					(struct kvec *)&conn->ksnc_rx_iov_space;
+			conn->ksnc_rx_iov[0].iov_base +					&conn->ksnc_msg.ksm_u.lnetmsg;
+			conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
 			break;
 
 		default:
-			LBUG ();
+			LBUG();
 		}
 		conn->ksnc_rx_niov = 1;
 
@@ -1093,10 +1114,10 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 		conn->ksnc_rx_iov[niov].iov_len  = nob;
 		niov++;
 		skipped += nob;
-		nob_to_skip -=nob;
+		nob_to_skip -= nob;
 
 	} while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
-		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
 
 	conn->ksnc_rx_niov = niov;
 	conn->ksnc_rx_kiov = NULL;
@@ -1106,13 +1127,13 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 }
 
 static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(ksock_conn_t *conn)
 {
 	lnet_hdr_t *lhdr;
 	lnet_process_id_t *id;
 	int rc;
 
-	LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+	LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
 
 	/* NB: sched lock NOT held */
 	/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
@@ -1125,24 +1146,27 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		rc = ksocknal_receive(conn);
 
 		if (rc <= 0) {
-			LASSERT (rc != -EAGAIN);
+			LASSERT(rc != -EAGAIN);
 
 			if (rc = 0)
-				CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
+				CDEBUG(D_NET,
+				       "[%p] EOF from %s ip %pI4h:%d\n",
 				       conn,
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 				       &conn->ksnc_ipaddr,
 				       conn->ksnc_port);
 			else if (!conn->ksnc_closing)
-				CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
+				CERROR(
+				       "[%p] Error %d on read from %s ip %pI4h:%d\n",
 				       conn, rc,
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
 				       &conn->ksnc_ipaddr,
 				       conn->ksnc_port);
 
 			/* it's not an error if conn is being closed */
-			ksocknal_close_conn_and_siblings (conn,
-							  (conn->ksnc_closing) ? 0 : rc);
+			ksocknal_close_conn_and_siblings(conn,
+							 (conn->ksnc_closing) ?
+							 0 : rc);
 			return (rc = 0 ? -ESHUTDOWN : rc);
 		}
 
@@ -1185,18 +1209,20 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
 			__u64 cookie = 0;
 
-			LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+			LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
 
 			if (conn->ksnc_msg.ksm_type = KSOCK_MSG_NOOP)
 				cookie = conn->ksnc_msg.ksm_zc_cookies[0];
 
 			rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
-					       conn->ksnc_msg.ksm_zc_cookies[1]);
+				       conn->ksnc_msg.ksm_zc_cookies[1]);
 
 			if (rc != 0) {
-				CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
+				CERROR(
+				       "%s: Unknown ZC-ACK cookie: %llu, %llu\n",
 				       libcfs_id2str(conn->ksnc_peer->ksnp_id),
-				       cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
+				       cookie,
+				       conn->ksnc_msg.ksm_zc_cookies[1]);
 				ksocknal_new_packet(conn, 0);
 				ksocknal_close_conn_and_siblings(conn, -EPROTO);
 				return rc;
@@ -1204,7 +1230,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		}
 
 		if (conn->ksnc_msg.ksm_type = KSOCK_MSG_NOOP) {
-			ksocknal_new_packet (conn, 0);
+			ksocknal_new_packet(conn, 0);
 			return 0;       /* NOOP is done and just return */
 		}
 
@@ -1245,14 +1271,14 @@ ksocknal_process_receive (ksock_conn_t *conn)
 		if (rc < 0) {
 			/* I just received garbage: give up on this conn */
 			ksocknal_new_packet(conn, 0);
-			ksocknal_close_conn_and_siblings (conn, rc);
+			ksocknal_close_conn_and_siblings(conn, rc);
 			ksocknal_conn_decref(conn);
 			return -EPROTO;
 		}
 
 		/* I'm racing with ksocknal_recv() */
-		LASSERT (conn->ksnc_rx_state = SOCKNAL_RX_PARSE ||
-			 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD);
+		LASSERT(conn->ksnc_rx_state = SOCKNAL_RX_PARSE ||
+			conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD);
 
 		if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
 			return 0;
@@ -1289,14 +1315,14 @@ ksocknal_process_receive (ksock_conn_t *conn)
 
 		if (rc != 0) {
 			ksocknal_new_packet(conn, 0);
-			ksocknal_close_conn_and_siblings (conn, rc);
+			ksocknal_close_conn_and_siblings(conn, rc);
 			return -EPROTO;
 		}
 		/* Fall through */
 
 	case SOCKNAL_RX_SLOP:
 		/* starting new packet? */
-		if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+		if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
 			return 0;       /* come back later */
 		goto again;	     /* try to finish reading slop now */
 
@@ -1310,7 +1336,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
 }
 
 int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 	       unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
 	       unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
@@ -1351,8 +1377,8 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 	switch (conn->ksnc_rx_state) {
 	case SOCKNAL_RX_PARSE_WAIT:
 		list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
-		wake_up (&sched->kss_waitq);
-		LASSERT (conn->ksnc_rx_ready);
+		wake_up(&sched->kss_waitq);
+		LASSERT(conn->ksnc_rx_ready);
 		break;
 
 	case SOCKNAL_RX_PARSE:
@@ -1410,7 +1436,7 @@ int ksocknal_scheduler(void *arg)
 
 		/* Ensure I progress everything semi-fairly */
 
-		if (!list_empty (&sched->kss_rx_conns)) {
+		if (!list_empty(&sched->kss_rx_conns)) {
 			conn = list_entry(sched->kss_rx_conns.next,
 					      ksock_conn_t, ksnc_rx_list);
 			list_del(&conn->ksnc_rx_list);
@@ -1443,7 +1469,7 @@ int ksocknal_scheduler(void *arg)
 				conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
 			} else if (conn->ksnc_rx_ready) {
 				/* reschedule for rx */
-				list_add_tail (&conn->ksnc_rx_list,
+				list_add_tail(&conn->ksnc_rx_list,
 						   &sched->kss_rx_conns);
 			} else {
 				conn->ksnc_rx_scheduled = 0;
@@ -1454,7 +1480,7 @@ int ksocknal_scheduler(void *arg)
 			did_something = 1;
 		}
 
-		if (!list_empty (&sched->kss_tx_conns)) {
+		if (!list_empty(&sched->kss_tx_conns)) {
 			LIST_HEAD(zlist);
 
 			if (!list_empty(&sched->kss_zombie_noop_txs)) {
@@ -1465,7 +1491,7 @@ int ksocknal_scheduler(void *arg)
 
 			conn = list_entry(sched->kss_tx_conns.next,
 					      ksock_conn_t, ksnc_tx_list);
-			list_del (&conn->ksnc_tx_list);
+			list_del(&conn->ksnc_tx_list);
 
 			LASSERT(conn->ksnc_tx_scheduled);
 			LASSERT(conn->ksnc_tx_ready);
@@ -1496,7 +1522,8 @@ int ksocknal_scheduler(void *arg)
 			rc = ksocknal_process_transmit(conn, tx);
 
 			if (rc = -ENOMEM || rc = -EAGAIN) {
-				/* Incomplete send: replace tx on HEAD of tx_queue */
+				/* Incomplete send: replace tx on HEAD of
+				 * tx_queue */
 				spin_lock_bh(&sched->kss_lock);
 				list_add(&tx->tx_list,
 					     &conn->ksnc_tx_queue);
@@ -1535,7 +1562,7 @@ int ksocknal_scheduler(void *arg)
 				rc = wait_event_interruptible_exclusive(
 					sched->kss_waitq,
 					!ksocknal_sched_cansleep(sched));
-				LASSERT (rc = 0);
+				LASSERT(rc = 0);
 			} else {
 				cond_resched();
 			}
@@ -1553,7 +1580,7 @@ int ksocknal_scheduler(void *arg)
  * Add connection to kss_rx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(ksock_conn_t *conn)
 {
 	ksock_sched_t *sched;
 
@@ -1570,7 +1597,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
 
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 	spin_unlock_bh(&sched->kss_lock);
 }
@@ -1579,7 +1606,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
 {
 	ksock_sched_t *sched;
 
@@ -1591,20 +1618,20 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
 	if (!conn->ksnc_tx_scheduled && /* not being progressed */
 	    !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
-		list_add_tail (&conn->ksnc_tx_list,
+		list_add_tail(&conn->ksnc_tx_list,
 				   &sched->kss_tx_conns);
 		conn->ksnc_tx_scheduled = 1;
 		/* extra ref for scheduler */
 		ksocknal_conn_addref(conn);
 
-		wake_up (&sched->kss_waitq);
+		wake_up(&sched->kss_waitq);
 	}
 
 	spin_unlock_bh(&sched->kss_lock);
 }
 
 static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
 {
 	__u32 version = 0;
 
@@ -1634,8 +1661,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
 	if (hello->kshm_magic = le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
 		lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
 
-		CLASSERT(sizeof (lnet_magicversion_t) =
-			 offsetof (ksock_hello_msg_t, kshm_src_nid));
+		CLASSERT(sizeof(lnet_magicversion_t) =
+			 offsetof(ksock_hello_msg_t, kshm_src_nid));
 
 		if (hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
 		    hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
@@ -1646,8 +1673,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
 }
 
 int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-		     lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+		    lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
 {
 	/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
 	ksock_net_t *net = (ksock_net_t *)ni->ni_data;
@@ -1684,9 +1711,9 @@ ksocknal_invert_type(int type)
 }
 
 int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-		     ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
-		     __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+		    ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+		    __u64 *incarnation)
 {
 	/* Return < 0	fatal error
 	 *	0	  success
@@ -1707,17 +1734,18 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 	timeout = active ? *ksocknal_tunables.ksnd_timeout :
 			    lnet_acceptor_timeout();
 
-	rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
+	rc = libcfs_sock_read(sock, &hello->kshm_magic,
+			      sizeof(hello->kshm_magic), timeout);
 	if (rc != 0) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
 			rc, &conn->ksnc_ipaddr);
-		LASSERT (rc < 0);
+		LASSERT(rc < 0);
 		return rc;
 	}
 
 	if (hello->kshm_magic != LNET_PROTO_MAGIC &&
 	    hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
-	    hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+	    hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
 		/* Unexpected magic! */
 		CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
 		       __cpu_to_le32 (hello->kshm_magic),
@@ -1772,7 +1800,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 	*incarnation = hello->kshm_src_incarnation;
 
 	if (hello->kshm_src_nid = LNET_NID_ANY) {
-		CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+		CERROR(
+		       "Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
 		       &conn->ksnc_ipaddr);
 		return -EPROTO;
 	}
@@ -1781,7 +1810,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 	    conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
 		/* Userspace NAL assigns peer process ID from socket */
 		recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
-		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+					 conn->ksnc_ipaddr);
 	} else {
 		recv_id.nid = hello->kshm_src_nid;
 		recv_id.pid = hello->kshm_src_pid;
@@ -1804,7 +1834,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 
 	if (peerid->pid != recv_id.pid ||
 	    peerid->nid != recv_id.nid) {
-		LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
+		LCONSOLE_ERROR_MSG(0x130,
+				   "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
 				   libcfs_id2str(*peerid),
 				   &conn->ksnc_ipaddr,
 				   libcfs_id2str(recv_id));
@@ -1828,7 +1859,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 }
 
 static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(ksock_route_t *route)
 {
 	LIST_HEAD(zombies);
 	ksock_peer_t *peer = route->ksnr_peer;
@@ -1840,7 +1871,8 @@ ksocknal_connect (ksock_route_t *route)
 	int rc = 0;
 
 	deadline = cfs_time_add(cfs_time_current(),
-				cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
+				cfs_time_seconds(
+					*ksocknal_tunables.ksnd_timeout));
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -1864,7 +1896,8 @@ ksocknal_connect (ksock_route_t *route)
 		if (peer->ksnp_accepting > 0) {
 			CDEBUG(D_NET,
 			       "peer %s(%d) already connecting to me, retry later.\n",
-			       libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+			       libcfs_nid2str(peer->ksnp_id.nid),
+			       peer->ksnp_accepting);
 			retry_later = 1;
 		}
 
@@ -1878,7 +1911,7 @@ ksocknal_connect (ksock_route_t *route)
 		} else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
 			type = SOCKLND_CONN_BULK_IN;
 		} else {
-			LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+			LASSERT((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
 			type = SOCKLND_CONN_BULK_OUT;
 		}
 
@@ -1930,9 +1963,10 @@ ksocknal_connect (ksock_route_t *route)
 			 * but the race is resolved quickly usually,
 			 * so min_reconnectms should be good heuristic */
 			route->ksnr_retry_interval -				cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+				cfs_time_seconds(
+				*ksocknal_tunables.ksnd_min_reconnectms)/1000;
 			route->ksnr_timeout = cfs_time_add(cfs_time_current(),
-							   route->ksnr_retry_interval);
+						   route->ksnr_retry_interval);
 		}
 
 		ksocknal_launch_connection_locked(route);
@@ -1951,12 +1985,14 @@ ksocknal_connect (ksock_route_t *route)
 	route->ksnr_retry_interval *= 2;
 	route->ksnr_retry_interval  		max(route->ksnr_retry_interval,
-		    cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+		    cfs_time_seconds(
+			    *ksocknal_tunables.ksnd_min_reconnectms)/1000);
 	route->ksnr_retry_interval  		min(route->ksnr_retry_interval,
-		    cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+		    cfs_time_seconds(
+			    *ksocknal_tunables.ksnd_max_reconnectms)/1000);
 
-	LASSERT (route->ksnr_retry_interval != 0);
+	LASSERT(route->ksnr_retry_interval != 0);
 	route->ksnr_timeout = cfs_time_add(cfs_time_current(),
 					   route->ksnr_retry_interval);
 
@@ -1967,10 +2003,10 @@ ksocknal_connect (ksock_route_t *route)
 
 		/* ksnp_tx_queue is queued on a conn on successful
 		 * connection for V1.x and V2.x */
-		if (!list_empty (&peer->ksnp_conns)) {
+		if (!list_empty(&peer->ksnp_conns)) {
 			conn = list_entry(peer->ksnp_conns.next,
 					      ksock_conn_t, ksnc_list);
-			LASSERT (conn->ksnc_proto = &ksocknal_protocol_v3x);
+			LASSERT(conn->ksnc_proto = &ksocknal_protocol_v3x);
 		}
 
 		/* take all the blocked packets while I've got the lock and
@@ -2106,7 +2142,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
 	now = cfs_time_current();
 
 	/* connd_routes can contain both pending and ordinary routes */
-	list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+	list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
 				 ksnr_connd_list) {
 
 		if (route->ksnr_retry_interval = 0 ||
@@ -2122,7 +2158,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
 }
 
 int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
 {
 	spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
 	ksock_connreq_t *cr;
@@ -2159,8 +2195,8 @@ ksocknal_connd (void *arg)
 
 		if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
 			/* Connection accepted by the listener */
-			cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
-					    next, ksock_connreq_t, ksncr_list);
+			cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+					ksock_connreq_t, ksncr_list);
 
 			list_del(&cr->ksncr_list);
 			spin_unlock_bh(connd_lock);
@@ -2182,7 +2218,7 @@ ksocknal_connd (void *arg)
 			route = ksocknal_connd_get_route_locked(&timeout);
 		}
 		if (route != NULL) {
-			list_del (&route->ksnr_connd_list);
+			list_del(&route->ksnr_connd_list);
 			ksocknal_data.ksnd_connd_connecting++;
 			spin_unlock_bh(connd_lock);
 			dropped_lock = 1;
@@ -2190,7 +2226,8 @@ ksocknal_connd (void *arg)
 			if (ksocknal_connect(route)) {
 				/* consecutive retry */
 				if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
-					CWARN("massive consecutive re-connecting to %pI4h\n",
+					CWARN(
+					      "massive consecutive re-connecting to %pI4h\n",
 					      &route->ksnr_ipaddr);
 					cons_retry = 0;
 				}
@@ -2216,7 +2253,8 @@ ksocknal_connd (void *arg)
 
 		/* Nothing to do for 'timeout'  */
 		set_current_state(TASK_INTERRUPTIBLE);
-		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+					 &wait);
 		spin_unlock_bh(connd_lock);
 
 		nloops = 0;
@@ -2233,15 +2271,16 @@ ksocknal_connd (void *arg)
 }
 
 static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn(ksock_peer_t *peer)
 {
 	/* We're called with a shared lock on ksnd_global_lock */
 	ksock_conn_t *conn;
 	struct list_head *ctmp;
 
-	list_for_each (ctmp, &peer->ksnp_conns) {
+	list_for_each(ctmp, &peer->ksnp_conns) {
 		int error;
-		conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+
+		conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 
 		/* Don't need the {get,put}connsock dance to deref ksnc_sock */
 		LASSERT(!conn->ksnc_closing);
@@ -2254,19 +2293,22 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 
 			switch (error) {
 			case ECONNRESET:
-				CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
+				CNETERR(
+					"A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
 					libcfs_id2str(peer->ksnp_id),
 					&conn->ksnc_ipaddr,
 					conn->ksnc_port);
 				break;
 			case ETIMEDOUT:
-				CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
+				CNETERR(
+					"A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
 					libcfs_id2str(peer->ksnp_id),
 					&conn->ksnc_ipaddr,
 					conn->ksnc_port);
 				break;
 			default:
-				CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
+				CNETERR(
+					"An unexpected network error %d occurred with %s (%pI4h:%d\n",
 					error,
 					libcfs_id2str(peer->ksnp_id),
 					&conn->ksnc_ipaddr,
@@ -2282,7 +2324,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 				     conn->ksnc_rx_deadline)) {
 			/* Timed out incomplete incoming message */
 			ksocknal_conn_addref(conn);
-			CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
+			CNETERR(
+				"Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
 				libcfs_id2str(peer->ksnp_id),
 				&conn->ksnc_ipaddr,
 				conn->ksnc_port,
@@ -2299,7 +2342,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 			/* Timed out messages queued for sending or
 			 * buffered in the socket's send buffer */
 			ksocknal_conn_addref(conn);
-			CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
+			CNETERR(
+				"Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
 				libcfs_id2str(peer->ksnp_id),
 				&conn->ksnc_ipaddr,
 				conn->ksnc_port);
@@ -2318,16 +2362,16 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
 
 	write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-	while (!list_empty (&peer->ksnp_tx_queue)) {
-		tx = list_entry (peer->ksnp_tx_queue.next,
+	while (!list_empty(&peer->ksnp_tx_queue)) {
+		tx = list_entry(peer->ksnp_tx_queue.next,
 				     ksock_tx_t, tx_list);
 
 		if (!cfs_time_aftereq(cfs_time_current(),
 				      tx->tx_deadline))
 			break;
 
-		list_del (&tx->tx_list);
-		list_add_tail (&tx->tx_list, &stale_txs);
+		list_del(&tx->tx_list);
+		list_add_tail(&tx->tx_list, &stale_txs);
 	}
 
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2342,7 +2386,8 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 	ksock_conn_t *conn;
 	ksock_tx_t *tx;
 
-	if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+	/* last_alive will be updated by create_conn */
+	if (list_empty(&peer->ksnp_conns))
 		return 0;
 
 	if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2351,7 +2396,8 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 	if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
 	    time_before(cfs_time_current(),
 			cfs_time_add(peer->ksnp_last_alive,
-				     cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
+				     cfs_time_seconds(
+				     *ksocknal_tunables.ksnd_keepalive))))
 		return 0;
 
 	if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
@@ -2397,7 +2443,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
 
 
 static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
 {
 	struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
 	ksock_peer_t *peer;
@@ -2420,12 +2466,12 @@ ksocknal_check_peer_timeouts (int idx)
 			goto again;
 		}
 
-		conn = ksocknal_find_timed_out_conn (peer);
+		conn = ksocknal_find_timed_out_conn(peer);
 
 		if (conn != NULL) {
 			read_unlock(&ksocknal_data.ksnd_global_lock);
 
-			ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+			ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 
 			/* NB we won't find this one again, but we can't
 			 * just proceed with the next peer, since we dropped
@@ -2436,9 +2482,9 @@ ksocknal_check_peer_timeouts (int idx)
 
 		/* we can't process stale txs right here because we're
 		 * holding only shared lock */
-		if (!list_empty (&peer->ksnp_tx_queue)) {
+		if (!list_empty(&peer->ksnp_tx_queue)) {
 			ksock_tx_t *tx -				list_entry (peer->ksnp_tx_queue.next,
+				list_entry(peer->ksnp_tx_queue.next,
 						ksock_tx_t, tx_list);
 
 			if (cfs_time_aftereq(cfs_time_current(),
@@ -2483,12 +2529,13 @@ ksocknal_check_peer_timeouts (int idx)
 		spin_unlock(&peer->ksnp_lock);
 		read_unlock(&ksocknal_data.ksnd_global_lock);
 
-		CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
+		CERROR(
+		       "Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
 		       n, libcfs_nid2str(peer->ksnp_id.nid), tx,
 		       cfs_duration_sec(cfs_time_current() - deadline),
 		       resid, conn->ksnc_sock->sk->sk_wmem_queued);
 
-		ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+		ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 		ksocknal_conn_decref(conn);
 		goto again;
 	}
@@ -2497,7 +2544,7 @@ ksocknal_check_peer_timeouts (int idx)
 }
 
 int
-ksocknal_reaper (void *arg)
+ksocknal_reaper(void *arg)
 {
 	wait_queue_t wait;
 	ksock_conn_t *conn;
@@ -2518,11 +2565,11 @@ ksocknal_reaper (void *arg)
 
 	while (!ksocknal_data.ksnd_shuttingdown) {
 
-		if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
-			conn = list_entry (ksocknal_data. \
-					       ksnd_deathrow_conns.next,
-					       ksock_conn_t, ksnc_list);
-			list_del (&conn->ksnc_list);
+		if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+			conn = list_entry(
+					ksocknal_data.ksnd_deathrow_conns.next,
+					ksock_conn_t, ksnc_list);
+			list_del(&conn->ksnc_list);
 
 			spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2533,10 +2580,10 @@ ksocknal_reaper (void *arg)
 			continue;
 		}
 
-		if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
-			conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
-					       next, ksock_conn_t, ksnc_list);
-			list_del (&conn->ksnc_list);
+		if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+			conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+					  ksock_conn_t, ksnc_list);
+			list_del(&conn->ksnc_list);
 
 			spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2546,7 +2593,7 @@ ksocknal_reaper (void *arg)
 			continue;
 		}
 
-		if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+		if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
 			list_add(&enomem_conns,
 				     &ksocknal_data.ksnd_enomem_conns);
 			list_del_init(&ksocknal_data.ksnd_enomem_conns);
@@ -2556,10 +2603,10 @@ ksocknal_reaper (void *arg)
 
 		/* reschedule all the connections that stalled with ENOMEM... */
 		nenomem_conns = 0;
-		while (!list_empty (&enomem_conns)) {
-			conn = list_entry (enomem_conns.next,
+		while (!list_empty(&enomem_conns)) {
+			conn = list_entry(enomem_conns.next,
 					       ksock_conn_t, ksnc_tx_list);
-			list_del (&conn->ksnc_tx_list);
+			list_del(&conn->ksnc_tx_list);
 
 			sched = conn->ksnc_scheduler;
 
@@ -2596,7 +2643,7 @@ ksocknal_reaper (void *arg)
 				chunk = 1;
 
 			for (i = 0; i < chunk; i++) {
-				ksocknal_check_peer_timeouts (peer_index);
+				ksocknal_check_peer_timeouts(peer_index);
 				peer_index = (peer_index + 1) %
 					     ksocknal_data.ksnd_peer_hash_size;
 			}
@@ -2613,16 +2660,16 @@ ksocknal_reaper (void *arg)
 		ksocknal_data.ksnd_reaper_waketime  			cfs_time_add(cfs_time_current(), timeout);
 
-		set_current_state (TASK_INTERRUPTIBLE);
-		add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 		if (!ksocknal_data.ksnd_shuttingdown &&
-		    list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-		    list_empty (&ksocknal_data.ksnd_zombie_conns))
+		    list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+		    list_empty(&ksocknal_data.ksnd_zombie_conns))
 			schedule_timeout(timeout);
 
-		set_current_state (TASK_RUNNING);
-		remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 	}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index caeb347..d8f4960 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -176,7 +176,8 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 		    nob < tx->tx_resid)
 			msg.msg_flags |= MSG_MORE;
 
-		rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
+		rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov,
+				    niov, nob);
 
 		for (i = 0; i < niov; i++)
 			kunmap(kiov[i].kiov_page);
@@ -250,7 +251,8 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
 				fragnob = sum;
 
 			conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
-							   iov[i].iov_base, fragnob);
+							   iov[i].iov_base,
+							   fragnob);
 		}
 		conn->ksnc_msg.ksm_csum = saved_csum;
 	}
@@ -286,7 +288,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
 
 	for (nob = i = 0; i < niov; i++) {
 		if ((kiov[i].kiov_offset != 0 && i > 0) ||
-		    (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+		    (kiov[i].kiov_offset + kiov[i].kiov_len !+		     PAGE_CACHE_SIZE && i < niov - 1))
 			return NULL;
 
 		pages[i] = kiov[i].kiov_page;
@@ -358,9 +361,9 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
 			LASSERT(i < niov);
 
 			/* Dang! have to kmap again because I have nowhere to
-                         * stash the mapped address.  But by doing it while the
-                         * page is still mapped, the kernel just bumps the map
-                         * count and returns me the address it stashed. */
+			 * stash the mapped address.  But by doing it while the
+			 * page is still mapped, the kernel just bumps the map
+			 * count and returns me the address it stashed. */
 			base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
 			fragnob = kiov[i].kiov_len;
 			if (fragnob > sum)
@@ -404,7 +407,8 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
 			base = kmap(tx->tx_kiov[i].kiov_page) +
 			       tx->tx_kiov[i].kiov_offset;
 
-			csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
+			csum = ksocknal_csum(csum, base,
+					     tx->tx_kiov[i].kiov_len);
 
 			kunmap(tx->tx_kiov[i].kiov_page);
 		}
@@ -423,7 +427,8 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
 }
 
 int
-ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem,
+			       int *nagle)
 {
 	struct socket *sock = conn->ksnc_sock;
 	int len;
@@ -666,7 +671,6 @@ ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn)
 	sock->sk->sk_user_data = conn;
 	sock->sk->sk_data_ready = ksocknal_data_ready;
 	sock->sk->sk_write_space = ksocknal_write_space;
-	return;
 }
 
 void
@@ -682,8 +686,6 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 	 * on ksnd_global_lock (to serialise with me) and NOOP if
 	 * sk_user_data is NULL. */
 	sock->sk->sk_user_data = NULL;
-
-	return ;
 }
 
 int
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
index f556388..8524630 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
@@ -71,7 +71,7 @@ static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
 	return crc32_le(crc, p, len);
 #else
 	while (len-- > 0)
-		crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
+		crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff);
 	return crc;
 #endif
 }
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 1938d6a..7d767df 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -65,7 +65,8 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
 	} else {
 		conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
 						       ksock_tx_t, tx_list);
-		LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type = tx->tx_msg.ksm_type);
+		LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type =
+			tx->tx_msg.ksm_type);
 	}
 }
 
@@ -198,16 +199,20 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 	}
 
 	if (tx->tx_msg.ksm_zc_cookies[0] = 0) {
-		/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+		/* NOOP tx has only one ZC-ACK cookie, can carry at least
+		 * one more */
 		if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
-			tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
+			tx->tx_msg.ksm_zc_cookies[0] +				tx->tx_msg.ksm_zc_cookies[1];
 			tx->tx_msg.ksm_zc_cookies[1] = cookie;
 		} else {
 			tx->tx_msg.ksm_zc_cookies[0] = cookie;
 		}
 
-		if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
-			/* not likely to carry more ACKs, skip it to simplify logic */
+		if (tx->tx_msg.ksm_zc_cookies[0] -
+		    tx->tx_msg.ksm_zc_cookies[1] > 2) {
+			/* not likely to carry more ACKs, skip it to
+			 * simplify logic */
 			ksocknal_next_tx_carrier(conn);
 		}
 
@@ -241,7 +246,8 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 		}
 
 	} else {
-		/* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+		/* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range
+		 * of cookies */
 		if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
 		    cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
 			CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -428,7 +434,8 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
 				     &peer->ksnp_zc_req_list, tx_zc_list) {
 		__u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
-		if (c = cookie1 || c = cookie2 || (cookie1 < c && c < cookie2)) {
+		if (c = cookie1 || c = cookie2 ||
+		    (cookie1 < c && c < cookie2)) {
 			tx->tx_msg.ksm_zc_cookies[0] = 0;
 			list_del(&tx->tx_zc_list);
 			list_add(&tx->tx_zc_list, &zlist);
@@ -507,9 +514,8 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
 	if (hello->kshm_nips = 0)
 		goto out;
 
-	for (i = 0; i < (int) hello->kshm_nips; i++) {
+	for (i = 0; i < (int) hello->kshm_nips; i++)
 		hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
-	}
 
 	rc = libcfs_sock_write(sock, hello->kshm_ips,
 			       hello->kshm_nips * sizeof(__u32),
@@ -544,7 +550,8 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
 		LNET_UNLOCK();
 	}
 
-	rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
+	rc = libcfs_sock_write(sock, hello,
+			       offsetof(ksock_hello_msg_t, kshm_ips),
 			       lnet_acceptor_timeout());
 
 	if (rc != 0) {
@@ -645,7 +652,8 @@ out:
 }
 
 static int
-ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello,
+		       int timeout)
 {
 	struct socket *sock = conn->ksnc_sock;
 	int rc;
@@ -658,7 +666,8 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
 
 	rc = libcfs_sock_read(sock, &hello->kshm_src_nid,
 			      offsetof(ksock_hello_msg_t, kshm_ips) -
-				       offsetof(ksock_hello_msg_t, kshm_src_nid),
+				       offsetof(ksock_hello_msg_t,
+						kshm_src_nid),
 			      timeout);
 	if (rc != 0) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
@@ -720,7 +729,8 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
 	tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
 	tx->tx_iov[0].iov_len  = sizeof(lnet_hdr_t);
 
-	tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+	tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len +
+				    sizeof(lnet_hdr_t);
 }
 
 static void
@@ -733,14 +743,18 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx)
 
 		tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
 		tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
-		tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+		tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) +
+					    tx->tx_lnetmsg->msg_len;
 	} else {
 		LASSERT(tx->tx_msg.ksm_type = KSOCK_MSG_NOOP);
 
-		tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
-		tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
+		tx->tx_iov[0].iov_len = offsetof(ksock_msg_t,
+						 ksm_u.lnetmsg.ksnm_hdr);
+		tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t,
+						     ksm_u.lnetmsg.ksnm_hdr);
 	}
-	/* Don't checksum before start sending, because packet can be piggybacked with ACK */
+	/* Don't checksum before start sending, because packet can be
+	 * piggybacked with ACK */
 }
 
 static void
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [PATCH 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (20 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups Mike Shuey
@ 2015-05-21 19:50 ` Mike Shuey
  2015-05-22 10:55   ` Dan Carpenter
  2015-05-22  9:21 ` [PATCH v4 00/13] staging: lustre: lnet: code cleanups Dan Carpenter
  22 siblings, 1 reply; 54+ messages in thread
From: Mike Shuey @ 2015-05-21 19:50 UTC (permalink / raw)
  To: oleg.drokin
  Cc: devel, gregkh, kernel-janitors, linux-kernel, HPDD-discuss,
	lustre-deve, Mike Shuey

Cleaning up more checkpatch.pl issues.

Signed-off-by: Mike Shuey <shuey@purdue.edu>
---
 drivers/staging/lustre/lnet/selftest/brw_test.c  |    3 +-
 drivers/staging/lustre/lnet/selftest/conctl.c    |    4 +-
 drivers/staging/lustre/lnet/selftest/conrpc.c    |   41 ++++++++--------
 drivers/staging/lustre/lnet/selftest/conrpc.h    |    7 ++-
 drivers/staging/lustre/lnet/selftest/console.c   |   33 ++++++------
 drivers/staging/lustre/lnet/selftest/console.h   |   14 +++--
 drivers/staging/lustre/lnet/selftest/framework.c |   46 ++++++++---------
 drivers/staging/lustre/lnet/selftest/module.c    |    3 +-
 drivers/staging/lustre/lnet/selftest/ping_test.c |    1 -
 drivers/staging/lustre/lnet/selftest/rpc.c       |   29 +++++------
 drivers/staging/lustre/lnet/selftest/selftest.h  |   58 ++++++++++------------
 drivers/staging/lustre/lnet/selftest/timer.h     |    2 +-
 12 files changed, 119 insertions(+), 122 deletions(-)

diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index de11f1b..fd3383f 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -463,7 +463,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
 		npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
 
 	} else {
-		npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >>
+		      PAGE_CACHE_SHIFT;
 	}
 
 	replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 1a7870e..86cb031 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -752,7 +752,8 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
 	/* have parameter, check if parameter length is valid */
 	if (args->lstio_tes_param != NULL &&
 	    (args->lstio_tes_param_len <= 0 ||
-	     args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+	     args->lstio_tes_param_len > PAGE_CACHE_SIZE -
+	     sizeof(lstcon_test_t)))
 		return -EINVAL;
 
 	LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -925,5 +926,4 @@ out:
 
 	return rc;
 }
-
 EXPORT_SYMBOL(lstcon_ioctl_entry);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index a1a4e08..c0ad353 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -455,14 +455,13 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
 		      lstcon_session_feats_check(trans->tas_features);
 	}
 
-	CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
+	CDEBUG(D_NET,
+	       "transaction %s : success %d, failure %d, total %d, RPC error(%d), Framework error(%d)\n",
 	       lstcon_rpc_trans_name(trans->tas_opc),
 	       lstcon_rpc_stat_success(stat, 0),
 	       lstcon_rpc_stat_failure(stat, 0),
 	       lstcon_rpc_stat_total(stat, 0),
 	       stat->trs_rpc_errno, stat->trs_fwk_errno);
-
-	return;
 }
 
 int
@@ -564,10 +563,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
 			continue;
 		}
 
-		/* rpcs can be still not callbacked (even LNetMDUnlink is called)
-		 * because huge timeout for inaccessible network, don't make
-		 * user wait for them, just abandon them, they will be recycled
-		 * in callback */
+		/* rpcs can be still not callbacked (even LNetMDUnlink is
+		 * called) because huge timeout for inaccessible network,
+		 * don't make user wait for them, just abandon them, they
+		 * will be recycled in callback */
 
 		LASSERT(crpc->crp_status != 0);
 
@@ -591,8 +590,6 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
 	       lstcon_rpc_trans_name(trans->tas_opc), count);
 
 	LIBCFS_FREE(trans, sizeof(*trans));
-
-	return;
 }
 
 int
@@ -947,9 +944,10 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
 	}
 
 	if (reply->msg_ses_feats != trans->tas_features) {
-		CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
-			 reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
-			 trans->tas_features);
+		CNETERR(
+			"Framework features %x from %s is different with features on this transaction: %x\n",
+			reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
+			trans->tas_features);
 		status = mksn_rep->mksn_status = EPROTO;
 	}
 
@@ -1077,8 +1075,6 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
 
 	if (stat->trs_fwk_errno = 0)
 		stat->trs_fwk_errno = rc;
-
-	return;
 }
 
 int
@@ -1111,8 +1107,9 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
 			continue;
 
 		if (rc < 0) {
-			CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
-					transop, rc);
+			CDEBUG(D_NET,
+			       "Condition error while creating RPC for transaction %d: %d\n",
+			       transop, rc);
 			break;
 		}
 
@@ -1278,7 +1275,8 @@ lstcon_rpc_pinger(void *arg)
 
 	CDEBUG(D_NET, "Ping %d nodes in session\n", count);
 
-	ptimer->stt_expires = (unsigned long)(get_seconds() + LST_PING_INTERVAL);
+	ptimer->stt_expires = (unsigned long)(get_seconds() +
+			      LST_PING_INTERVAL);
 	stt_add_timer(ptimer);
 
 	mutex_unlock(&console_session.ses_mutex);
@@ -1301,7 +1299,8 @@ lstcon_rpc_pinger_start(void)
 	}
 
 	ptimer = &console_session.ses_ping_timer;
-	ptimer->stt_expires = (unsigned long)(get_seconds() + LST_PING_INTERVAL);
+	ptimer->stt_expires = (unsigned long)(get_seconds() +
+			      LST_PING_INTERVAL);
 
 	stt_add_timer(ptimer);
 
@@ -1349,7 +1348,8 @@ lstcon_rpc_cleanup_wait(void)
 
 		mutex_unlock(&console_session.ses_mutex);
 
-		CWARN("Session is shutting down, waiting for termination of transactions\n");
+		CWARN(
+		      "Session is shutting down, waiting for termination of transactions\n");
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(cfs_time_seconds(1));
 
@@ -1381,7 +1381,8 @@ lstcon_rpc_module_init(void)
 {
 	INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
 	console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger;
-	console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer;
+	console_session.ses_ping_timer.stt_data +		&console_session.ses_ping_timer;
 
 	console_session.ses_ping = NULL;
 
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 7d33cf9..5afb679 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -54,7 +54,8 @@
 #define LST_TRANS_TIMEOUT       30
 #define LST_TRANS_MIN_TIMEOUT   3
 
-#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
+#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), \
+					LST_TRANS_TIMEOUT)
 
 #define LST_PING_INTERVAL       8
 
@@ -105,8 +106,8 @@ typedef struct lstcon_rpc_trans {
 
 #define LST_TRANS_STATQRY       0x21
 
-typedef int (* lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (* lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
+typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
+typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
 
 int  lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
 			unsigned version, lstcon_rpc_t **crpc);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index f47c8f2..ed9c38f 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -77,7 +77,8 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
 
 	LASSERT(id.nid != LNET_NID_ANY);
 
-	list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) {
+	list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx],
+			    ndl_hlink) {
 		if (ndl->ndl_node->nd_id.nid != id.nid ||
 		    ndl->ndl_node->nd_id.pid != id.pid)
 			continue;
@@ -253,9 +254,8 @@ lstcon_group_decref(lstcon_group_t *grp)
 
 	lstcon_group_drain(grp, 0);
 
-	for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+	for (i = 0; i < LST_NODE_HASHSIZE; i++)
 		LASSERT(list_empty(&grp->grp_ndl_hash[i]));
-	}
 
 	LIBCFS_FREE(grp, offsetof(lstcon_group_t,
 				  grp_ndl_hash[LST_NODE_HASHSIZE]));
@@ -308,7 +308,7 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
 {
 	list_del_init(&ndl->ndl_link);
 	lstcon_ndlink_release(ndl);
-	grp->grp_nnode --;
+	grp->grp_nnode--;
 }
 
 static void
@@ -320,13 +320,11 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
 
 	list_del(&ndl->ndl_hlink);
 	list_del(&ndl->ndl_link);
-	old->grp_nnode --;
+	old->grp_nnode--;
 
 	list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
 	list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
 	new->grp_nnode++;
-
-	return;
 }
 
 static void
@@ -527,7 +525,7 @@ lstcon_group_add(char *name)
 	lstcon_group_t *grp;
 	int rc;
 
-	rc = (lstcon_group_find(name, &grp) = 0)? -EEXIST: 0;
+	rc = (lstcon_group_find(name, &grp) = 0) ? -EEXIST : 0;
 	if (rc != 0) {
 		/* find a group with same name */
 		lstcon_group_put(grp);
@@ -816,7 +814,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
 		LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
 
 	rc = copy_to_user(gents_p, gentp,
-			      sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
+			      sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
 
 	LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
 
@@ -847,7 +845,7 @@ lstcon_batch_add(char *name)
 	int i;
 	int rc;
 
-	rc = (lstcon_batch_find(name, &bat) = 0)? -EEXIST: 0;
+	rc = (lstcon_batch_find(name, &bat) = 0) ? -EEXIST : 0;
 	if (rc != 0) {
 		CDEBUG(D_NET, "Batch %s already exists\n", name);
 		return rc;
@@ -911,7 +909,7 @@ lstcon_batch_list(int index, int len, char *name_up)
 	list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
 		if (index-- = 0) {
 			return copy_to_user(name_up, bat->bat_name, len) ?
-			       -EFAULT: 0;
+			       -EFAULT : 0;
 		}
 	}
 
@@ -956,7 +954,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
 				  &test->tes_dst_grp->grp_ndl_list;
 
 	if (dents_up != NULL) {
-		rc = lstcon_nodes_getent((server ? srvlst: clilst),
+		rc = lstcon_nodes_getent((server ? srvlst : clilst),
 					 index_p, ndent_p, dents_up);
 		return rc;
 	}
@@ -1208,7 +1206,8 @@ again:
 
 		lstcon_rpc_trans_destroy(trans);
 		/* return if any error */
-		CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n",
+		CDEBUG(D_NET,
+		       "Failed to add test %s, RPC error %d, framework error %d\n",
 		       transop = LST_TRANS_TSBCLIADD ? "client" : "server",
 		       lstcon_trans_stat()->trs_rpc_errno,
 		       lstcon_trans_stat()->trs_fwk_errno);
@@ -1683,7 +1682,7 @@ int
 lstcon_session_match(lst_sid_t sid)
 {
 	return (console_session.ses_id.ses_nid   = sid.ses_nid &&
-		console_session.ses_id.ses_stamp = sid.ses_stamp) ?  1: 0;
+		console_session.ses_id.ses_stamp = sid.ses_stamp) ? 1 : 0;
 }
 
 static void
@@ -1884,7 +1883,8 @@ lstcon_session_feats_check(unsigned feats)
 	spin_unlock(&console_session.ses_rpc_lock);
 
 	if (rc != 0) {
-		CERROR("remote features %x do not match with session features %x of console\n",
+		CERROR(
+		       "remote features %x do not match with session features %x of console\n",
 		       feats, console_session.ses_features);
 	}
 
@@ -2083,9 +2083,8 @@ lstcon_console_fini(void)
 	LASSERT(list_empty(&console_session.ses_bat_list));
 	LASSERT(list_empty(&console_session.ses_trans_list));
 
-	for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+	for (i = 0; i < LST_NODE_HASHSIZE; i++)
 		LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
-	}
 
 	LIBCFS_FREE(console_session.ses_ndl_hash,
 		    sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index c4cf0ae..7638042 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -116,9 +116,11 @@ typedef struct lstcon_test {
 	int              tes_oneside;    /* one-sided test */
 	int              tes_concur;     /* concurrency */
 	int              tes_loop;       /* loop count */
-	int              tes_dist;       /* nodes distribution of target group */
+	int              tes_dist;       /* nodes distribution of target
+					  * group */
 	int              tes_span;       /* nodes span of target group */
-	int              tes_cliidx;     /* client index, used for RPC creating */
+	int              tes_cliidx;     /* client index, used for RPC
+					  * creating */
 
 	struct list_head tes_trans_list; /* transaction list */
 	lstcon_group_t   *tes_src_grp;   /* group run the test */
@@ -177,7 +179,7 @@ lstcon_trans_stat(void)
 }
 
 static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
 {
 	unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
 
@@ -191,7 +193,8 @@ extern int lstcon_session_match(lst_sid_t sid);
 extern int lstcon_session_new(char *name, int key, unsigned version,
 			      int timeout, int flags, lst_sid_t *sid_up);
 extern int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
-			       lstcon_ndlist_ent_t *entp, char *name_up, int len);
+			       lstcon_ndlist_ent_t *entp, char *name_up,
+			       int len);
 extern int lstcon_session_end(void);
 extern int lstcon_session_debug(int timeout, struct list_head *result_up);
 extern int lstcon_session_feats_check(unsigned feats);
@@ -210,7 +213,8 @@ extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
 extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
 			       struct list_head *result_up);
 extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
-			     int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
+			     int *index_p, int *ndent_p,
+			     lstcon_node_ent_t *ndents_up);
 extern int lstcon_group_list(int idx, int len, char *name_up);
 extern int lstcon_batch_add(char *name);
 extern int lstcon_batch_run(char *name, int timeout,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 7c5185a..8d948e2 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -173,7 +173,6 @@ sfw_add_session_timer(void)
 	timer->stt_expires = cfs_time_add(sn->sn_timeout,
 					  get_seconds());
 	stt_add_timer(timer);
-	return;
 }
 
 static int
@@ -203,7 +202,8 @@ sfw_deactivate_session(void)
 	sfw_batch_t *tsb;
 	sfw_test_case_t *tsc;
 
-	if (sn = NULL) return;
+	if (sn = NULL)
+		return;
 
 	LASSERT(!sn->sn_timer_active);
 
@@ -298,7 +298,6 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
 
 	if (rpc->srpc_bulk != NULL)
 		sfw_free_pages(rpc);
-	return;
 }
 
 static void
@@ -573,7 +572,8 @@ sfw_load_test(struct sfw_test_instance *tsi)
 
 	rc = srpc_service_add_buffers(svc, nbuf);
 	if (rc != 0) {
-		CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
+		CWARN(
+		      "Failed to reserve enough buffers: service %s, %d needed: %d\n",
 		      svc->sv_name, nbuf, rc);
 		/* NB: this error handler is not strictly correct, because
 		 * it may release more buffers than already allocated,
@@ -604,7 +604,6 @@ sfw_unload_test(struct sfw_test_instance *tsi)
 	 * some buffers behind, but never mind... */
 	srpc_service_remove_buffers(tsc->tsc_srv_service,
 				    sfw_test_buffers(tsi));
-	return;
 }
 
 static void
@@ -613,7 +612,8 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
 	srpc_client_rpc_t *rpc;
 	sfw_test_unit_t *tsu;
 
-	if (!tsi->tsi_is_client) goto clean;
+	if (!tsi->tsi_is_client)
+		goto clean;
 
 	tsi->tsi_ops->tso_fini(tsi);
 
@@ -638,7 +638,6 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
 clean:
 	sfw_unload_test(tsi);
 	LIBCFS_FREE(tsi, sizeof(*tsi));
-	return;
 }
 
 static void
@@ -657,7 +656,6 @@ sfw_destroy_batch(sfw_batch_t *tsb)
 	}
 
 	LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
-	return;
 }
 
 void
@@ -677,7 +675,6 @@ sfw_destroy_session(sfw_session_t *sn)
 
 	LIBCFS_FREE(sn, sizeof(*sn));
 	atomic_dec(&sfw_data.fw_nzombies);
-	return;
 }
 
 static void
@@ -722,7 +719,6 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
 	}
 
 	LBUG();
-	return;
 }
 
 static int
@@ -785,7 +781,8 @@ sfw_add_test_instance(sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
 		lnet_process_id_packed_t id;
 		int j;
 
-		dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
+		dests = page_address(bk->bk_iovs[i /
+						 SFW_ID_PER_PAGE].kiov_page);
 		LASSERT(dests != NULL);  /* my pages are within KVM always */
 		id = dests[i % SFW_ID_PER_PAGE];
 		if (msg->msg_magic != SRPC_MSG_MAGIC)
@@ -860,7 +857,6 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
 	spin_unlock(&sfw_data.fw_lock);
 
 	sfw_destroy_session(sn);
-	return;
 }
 
 static void
@@ -896,7 +892,6 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
 	}
 
 	sfw_test_unit_done(tsu);
-	return;
 }
 
 int
@@ -1019,7 +1014,7 @@ sfw_run_batch(sfw_batch_t *tsb)
 			tsu->tsu_loop = tsi->tsi_loop;
 			wi = &tsu->tsu_worker;
 			swi_init_workitem(wi, tsu, sfw_run_test,
-					  lst_sched_test[\
+					  lst_sched_test[
 					  lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
 			swi_schedule_workitem(wi);
 		}
@@ -1268,7 +1263,8 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
 
 		if (sn != NULL &&
 		    sn->sn_features != request->msg_ses_feats) {
-			CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
+			CNETERR(
+				"Features of framework RPC don't match features of current session: %x/%x\n",
 				request->msg_ses_feats, sn->sn_features);
 			reply->msg_body.reply.status = EPROTO;
 			reply->msg_body.reply.sid    = sn->sn_id;
@@ -1345,7 +1341,8 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
 	spin_lock(&sfw_data.fw_lock);
 
 	if (status != 0) {
-		CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
+		CERROR(
+		       "Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
 		       sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
 		spin_unlock(&sfw_data.fw_lock);
 		return -EIO;
@@ -1556,7 +1553,6 @@ sfw_unpack_message(srpc_msg_t *msg)
 	}
 
 	LBUG();
-	return;
 }
 
 void
@@ -1568,7 +1564,6 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
 	spin_lock(&rpc->crpc_lock);
 	srpc_abort_rpc(rpc, -EINTR);
 	spin_unlock(&rpc->crpc_lock);
-	return;
 }
 
 void
@@ -1585,7 +1580,6 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
 	srpc_post_rpc(rpc);
 
 	spin_unlock(&rpc->crpc_lock);
-	return;
 }
 
 static srpc_service_t sfw_services[] = {
@@ -1660,7 +1654,8 @@ sfw_startup(void)
 	}
 
 	if (session_timeout = 0)
-		CWARN("Zero session_timeout specified - test sessions never expire.\n");
+		CWARN(
+		      "Zero session_timeout specified - test sessions never expire.\n");
 
 	if (rpc_timeout = 0)
 		CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
@@ -1700,7 +1695,8 @@ sfw_startup(void)
 
 	for (i = 0; ; i++) {
 		sv = &sfw_services[i];
-		if (sv->sv_name = NULL) break;
+		if (sv->sv_name = NULL)
+			break;
 
 		sv->sv_bulk_ready = NULL;
 		sv->sv_handler    = sfw_handle_server_rpc;
@@ -1717,11 +1713,13 @@ sfw_startup(void)
 		}
 
 		/* about to sfw_shutdown, no need to add buffer */
-		if (error) continue;
+		if (error)
+			continue;
 
 		rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
 		if (rc != 0) {
-			CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
+			CWARN(
+			      "Failed to reserve enough buffers: service %s, %d needed: %d\n",
 			      sv->sv_name, sv->sv_wi_total, rc);
 			error = -ENOMEM;
 		}
@@ -1799,6 +1797,4 @@ sfw_shutdown(void)
 		list_del(&tsc->tsc_list);
 		LIBCFS_FREE(tsc, sizeof(*tsc));
 	}
-
-	return;
 }
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 09b8f46..1afc581 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -118,7 +118,8 @@ lnet_selftest_init(void)
 		rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
 					 nthrs, &lst_sched_test[i]);
 		if (rc != 0) {
-			CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
+			CERROR(
+			       "Failed to create CPT affinity WI scheduler %d for LST\n",
 			       i);
 			goto error;
 		}
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 1dab998..6d48e06 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -165,7 +165,6 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
 	CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
 		(unsigned)((tv.tv_sec - (unsigned)reqst->pnr_time_sec) * 1000000
 			   + (tv.tv_usec - reqst->pnr_time_usec)));
-	return;
 }
 
 static int
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 59cf01f..41a09fa 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -118,7 +118,6 @@ srpc_free_bulk(srpc_bulk_t *bk)
 	}
 
 	LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
-	return;
 }
 
 srpc_bulk_t *
@@ -145,7 +144,8 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
 		struct page *pg;
 		int nob;
 
-		pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
+		pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(),
+							  cpt),
 				      GFP_IOFS, 0);
 		if (pg = NULL) {
 			CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
@@ -680,7 +680,8 @@ srpc_finish_service(struct srpc_service *sv)
 
 		rpc = list_entry(scd->scd_rpc_active.next,
 				     struct srpc_server_rpc, srpc_list);
-		CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
+		CNETERR(
+			"Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
 			rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
 			swi_state2str(rpc->srpc_wi.swi_state),
 			rpc->srpc_wi.swi_workitem.wi_scheduled,
@@ -952,7 +953,6 @@ srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
 	}
 
 	spin_unlock(&scd->scd_lock);
-	return;
 }
 
 /* handles an incoming RPC */
@@ -1103,7 +1103,6 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
 	timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
 					  get_seconds());
 	stt_add_timer(timer);
-	return;
 }
 
 /*
@@ -1166,7 +1165,6 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
 	spin_unlock(&rpc->crpc_lock);
 
 	(*rpc->crpc_done)(rpc);
-	return;
 }
 
 /* sends an outgoing RPC */
@@ -1244,7 +1242,8 @@ srpc_send_rpc(swi_workitem_t *wi)
 		if (reply->msg_type != type ||
 		    (reply->msg_magic != SRPC_MSG_MAGIC &&
 		     reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
-			CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
+			CWARN(
+			      "Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
 			      libcfs_id2str(rpc->crpc_dest),
 			      reply->msg_type, type,
 			      reply->msg_magic, SRPC_MSG_MAGIC);
@@ -1253,7 +1252,8 @@ srpc_send_rpc(swi_workitem_t *wi)
 		}
 
 		if (do_bulk && reply->msg_body.reply.status != 0) {
-			CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
+			CWARN(
+			      "Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
 			      reply->msg_body.reply.status,
 			      libcfs_id2str(rpc->crpc_dest));
 			LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
@@ -1336,7 +1336,6 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
 	rpc->crpc_aborted = 1;
 	rpc->crpc_status  = why;
 	swi_schedule_workitem(&rpc->crpc_wi);
-	return;
 }
 
 /* called with rpc->crpc_lock held */
@@ -1352,7 +1351,6 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
 
 	srpc_add_client_rpc_timer(rpc);
 	swi_schedule_workitem(&rpc->crpc_wi);
-	return;
 }
 
 
@@ -1440,11 +1438,13 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
 		if (rpcev != &crpc->crpc_reqstev &&
 		    rpcev != &crpc->crpc_replyev &&
 		    rpcev != &crpc->crpc_bulkev) {
-			CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
+			CERROR(
+			       "rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
 			       rpcev, crpc, &crpc->crpc_reqstev,
 			       &crpc->crpc_replyev, &crpc->crpc_bulkev);
 			CERROR("Bad event: status %d, type %d, lnet %d\n",
-			       rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
+			       rpcev->ev_status, rpcev->ev_type,
+			       rpcev->ev_lnet);
 			LBUG();
 		}
 
@@ -1511,7 +1511,8 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
 		     msg->msg_type != __swab32(type)) ||
 		    (msg->msg_magic != SRPC_MSG_MAGIC &&
 		     msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
-			CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
+			CERROR(
+			       "Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
 			       sv->sv_name, libcfs_id2str(ev->initiator),
 			       ev->status, ev->mlength,
 			       msg->msg_type, msg->msg_magic);
@@ -1668,6 +1669,4 @@ srpc_shutdown(void)
 	case SRPC_STATE_NI_INIT:
 		LNetNIFini();
 	}
-
-	return;
 }
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 7939e4e..7a2b3d1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -95,11 +95,11 @@ struct sfw_test_instance;
 #define SRPC_RDMA_PORTAL              52
 
 static inline srpc_msg_type_t
-srpc_service2request (int service)
+srpc_service2request(int service)
 {
 	switch (service) {
 	default:
-		LBUG ();
+		LBUG();
 	case SRPC_SERVICE_DEBUG:
 		return SRPC_MSG_DEBUG_REQST;
 
@@ -130,7 +130,7 @@ srpc_service2request (int service)
 }
 
 static inline srpc_msg_type_t
-srpc_service2reply (int service)
+srpc_service2reply(int service)
 {
 	return srpc_service2request(service) + 1;
 }
@@ -319,8 +319,8 @@ typedef struct srpc_service {
 	 * - sv_handler: process incoming RPC request
 	 * - sv_bulk_ready: notify bulk data
 	 */
-	int                     (*sv_handler) (srpc_server_rpc_t *);
-	int                     (*sv_bulk_ready) (srpc_server_rpc_t *, int);
+	int                     (*sv_handler)(srpc_server_rpc_t *);
+	int                     (*sv_bulk_ready)(srpc_server_rpc_t *, int);
 } srpc_service_t;
 
 typedef struct {
@@ -416,7 +416,7 @@ typedef struct sfw_test_case {
 srpc_client_rpc_t *
 sfw_create_rpc(lnet_process_id_t peer, int service,
 	       unsigned features, int nbulkiov, int bulklen,
-	       void (*done) (srpc_client_rpc_t *), void *priv);
+	       void (*done)(srpc_client_rpc_t *), void *priv);
 int sfw_create_test_rpc(sfw_test_unit_t *tsu,
 			lnet_process_id_t peer, unsigned features,
 			int nblk, int blklen, srpc_client_rpc_t **rpc);
@@ -428,7 +428,7 @@ void sfw_free_pages(srpc_server_rpc_t *rpc);
 void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
 		    int sink);
-int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
 
 srpc_client_rpc_t *
 srpc_create_client_rpc(lnet_process_id_t peer, int service,
@@ -504,26 +504,23 @@ void sfw_shutdown(void);
 void srpc_shutdown(void);
 
 static inline void
-srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
 {
 	LASSERT(rpc != NULL);
 	LASSERT(!srpc_event_pending(rpc));
 	LASSERT(atomic_read(&rpc->crpc_refcount) = 0);
 
-	if (rpc->crpc_fini = NULL) {
+	if (rpc->crpc_fini = NULL)
 		LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
-	} else {
+	else
 		(*rpc->crpc_fini) (rpc);
-	}
-
-	return;
 }
 
 static inline void
-srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
-		      int service, int nbulkiov, int bulklen,
-		      void (*rpc_done)(srpc_client_rpc_t *),
-		      void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+		     int service, int nbulkiov, int bulklen,
+		     void (*rpc_done)(srpc_client_rpc_t *),
+		     void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
 {
 	LASSERT(nbulkiov <= LNET_MAX_IOV);
 
@@ -555,24 +552,23 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
 	rpc->crpc_reqstmsg.msg_magic   = SRPC_MSG_MAGIC;
 	rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
 	rpc->crpc_reqstmsg.msg_type    = srpc_service2request(service);
-	return;
 }
 
 static inline const char *
-swi_state2str (int state)
+swi_state2str(int state)
 {
 #define STATE2STR(x) case x: return #x
-	switch(state) {
-		default:
-			LBUG();
-		STATE2STR(SWI_STATE_NEWBORN);
-		STATE2STR(SWI_STATE_REPLY_SUBMITTED);
-		STATE2STR(SWI_STATE_REPLY_SENT);
-		STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
-		STATE2STR(SWI_STATE_REQUEST_SENT);
-		STATE2STR(SWI_STATE_REPLY_RECEIVED);
-		STATE2STR(SWI_STATE_BULK_STARTED);
-		STATE2STR(SWI_STATE_DONE);
+	switch (state) {
+	default:
+		LBUG();
+	STATE2STR(SWI_STATE_NEWBORN);
+	STATE2STR(SWI_STATE_REPLY_SUBMITTED);
+	STATE2STR(SWI_STATE_REPLY_SENT);
+	STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
+	STATE2STR(SWI_STATE_REQUEST_SENT);
+	STATE2STR(SWI_STATE_REPLY_RECEIVED);
+	STATE2STR(SWI_STATE_BULK_STARTED);
+	STATE2STR(SWI_STATE_DONE);
 	}
 #undef STATE2STR
 }
@@ -607,7 +603,7 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
 
 	while (srpc_finish_service(sv) = 0) {
 		i++;
-		CDEBUG (((i & -i) = i) ? D_WARNING : D_NET,
+		CDEBUG(((i & -i) = i) ? D_WARNING : D_NET,
 			"Waiting for %s service to shutdown...\n",
 			sv->sv_name);
 		selftest_wait_events();
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index 2a8803d..e2b1a57 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -41,7 +41,7 @@
 typedef struct {
 	struct list_head stt_list;
 	unsigned long    stt_expires;
-	void             (*stt_func) (void *);
+	void             (*stt_func)(void *);
 	void             *stt_data;
 } stt_timer_t;
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-21 19:50 ` [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
@ 2015-05-21 21:00   ` Joe Perches
  2015-05-21 21:29     ` Julia Lawall
                       ` (2 more replies)
  0 siblings, 3 replies; 54+ messages in thread
From: Joe Perches @ 2015-05-21 21:00 UTC (permalink / raw)
  To: Mike Shuey
  Cc: oleg.drokin, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-deve

On Thu, 2015-05-21 at 15:50 -0400, Mike Shuey wrote:
> Fix many checkpatch.pl warnings.
[]
> diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
[]
> @@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
>  	switch (rc) {
>  	/* "normal" errors */
>  	case -ECONNREFUSED:
> -		CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
> -			libcfs_nid2str(peer_nid),
> -			&peer_ip, peer_port);
> +		CNETERR(
> +			"Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
> +			libcfs_nid2str(peer_nid), &peer_ip, peer_port);

These are not improvements and checkpatch messages aren't dicta.

Please don't convert code unless the conversion makes it better
for a human reader.

These don't.



^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-21 21:00   ` Joe Perches
@ 2015-05-21 21:29     ` Julia Lawall
       [not found]       ` <CABqvwjuKypiacf3336j-q450FGKaO4wh0Ld7PF8MGz0VPsYBbw@mail.gmail.com>
       [not found]     ` <CABqvwjs=S1VrCHe7M4JnDikGBVN3yqjNMrtSA=UvftGmxSasRQ@mail.gmail.com>
  2015-05-23  6:28     ` [PATCH] checkpatch: Categorize some long line length checks Joe Perches
  2 siblings, 1 reply; 54+ messages in thread
From: Julia Lawall @ 2015-05-21 21:29 UTC (permalink / raw)
  To: Joe Perches
  Cc: Mike Shuey, oleg.drokin, devel, gregkh, kernel-janitors,
	linux-kernel, HPDD-discuss, lustre-deve



On Thu, 21 May 2015, Joe Perches wrote:

> On Thu, 2015-05-21 at 15:50 -0400, Mike Shuey wrote:
> > Fix many checkpatch.pl warnings.
> []
> > diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
> []
> > @@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
> >  	switch (rc) {
> >  	/* "normal" errors */
> >  	case -ECONNREFUSED:
> > -		CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
> > -			libcfs_nid2str(peer_nid),
> > -			&peer_ip, peer_port);
> > +		CNETERR(
> > +			"Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
> > +			libcfs_nid2str(peer_nid), &peer_ip, peer_port);
>
> These are not improvements and checkpatch messages aren't dicta.
>
> Please don't convert code unless the conversion makes it better
> for a human reader.
>
> These don't.

I haven't looked into it, but perhaps there is a standard kernel printing
function that these could be converted to directly?

julia

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
       [not found]       ` <CABqvwjuKypiacf3336j-q450FGKaO4wh0Ld7PF8MGz0VPsYBbw@mail.gmail.com>
@ 2015-05-22  2:46         ` Joe Perches
  2015-05-22  5:06         ` Julia Lawall
  1 sibling, 0 replies; 54+ messages in thread
From: Joe Perches @ 2015-05-22  2:46 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Julia Lawall, Drokin, Oleg, devel, gregkh, kernel-janitors,
	linux-kernel, HPDD-discuss, lustre-devel

On Thu, 2015-05-21 at 18:04 -0400, Michael Shuey wrote:
> That's a task (of many) I've been putting on the back burner until the code
> is cleaner.  It's also a HUGE change, since there are debug macros
> everywhere, and they all check a #define'd mask to see if they should fire,
> and the behavior is likely governed by parts of the lustre user land tools
> as well.
> 
> Suggestions are welcome.  Do other parts of the linux kernel define complex
> debugging macros like these, or is this a lustre-ism?  Any suggestions on
> how to handle this more in line with existing drivers?

Yes, many other bits of code use custom debugging macros.

A good general form is to add a either a generic level or bitmask
macro and use a single entry like:

	my_dbg([optional_ptr,] <level|bitmap>, fmt, ...)

so that <level|bitmap> can be tested against some
variable set by MODULE_PARM_DESC controls.

So, CNETERR(...) might be lustre_dbg(ptr, NETERR, fmt, ...)

though I don't know what use the ptr might have.



^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
       [not found]     ` <CABqvwjs=S1VrCHe7M4JnDikGBVN3yqjNMrtSA=UvftGmxSasRQ@mail.gmail.com>
@ 2015-05-22  3:06       ` Joe Perches
  2015-05-22  8:04       ` Drokin, Oleg
  1 sibling, 0 replies; 54+ messages in thread
From: Joe Perches @ 2015-05-22  3:06 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Drokin, Oleg, devel, gregkh, kernel-janitors, linux-kernel,
	hpdd-discuss@lists.01.org, lustre-deve

On Thu, 2015-05-21 at 17:47 -0400, Michael Shuey wrote:
> Any suggestions on other checkpatch warnings?  Most of what remains are
> "don't introduce new typedefs" warnings - should these be removed as well,
> or am I safe to leave these?

I'm personally not a big fan of non-enum typedefs unless
the typedef hides some arch or size specific information
that's otherwise hard to handle.

I think struct/function/native type equivalent typedefs
are better removed.

coccinelle is a good tool for this.

I rather like enum typedefs, but that's not a common view
in lk land.

> I ask because these changes will be huge, and
> are unlikely to improve readability (but I don't know where the kernel
> community stands on having billions of typedefs everywhere.

I counted slightly less than billions.  I got 281.



^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
       [not found]       ` <CABqvwjuKypiacf3336j-q450FGKaO4wh0Ld7PF8MGz0VPsYBbw@mail.gmail.com>
  2015-05-22  2:46         ` Joe Perches
@ 2015-05-22  5:06         ` Julia Lawall
  2015-05-22  8:08           ` Drokin, Oleg
  1 sibling, 1 reply; 54+ messages in thread
From: Julia Lawall @ 2015-05-22  5:06 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Julia Lawall, Joe Perches, Drokin, Oleg, devel, gregkh,
	kernel-janitors, linux-kernel, HPDD-discuss, lustre-devel

[-- Attachment #1: Type: TEXT/PLAIN, Size: 2320 bytes --]

On Thu, 21 May 2015, Michael Shuey wrote:

> That's a task (of many) I've been putting on the back burner until the code
> is cleaner.  It's also a HUGE change, since there are debug macros
> everywhere, and they all check a #define'd mask to see if they should fire,
> and the behavior is likely governed by parts of the lustre user land tools
> as well.
>
> Suggestions are welcome.  Do other parts of the linux kernel define complex
> debugging macros like these, or is this a lustre-ism?  Any suggestions on
> how to handle this more in line with existing drivers?

Once you decide what to do, you can use Coccinelle to make the changes for
you.  So you shouldn't be put off by the number of code sites to change.

The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
Joe's suggestions if you really need something more complicated.

julia
>
> --
> Mike Shuey
>
> On Thu, May 21, 2015 at 5:29 PM, Julia Lawall <julia.lawall@lip6.fr> wrote:
> >
> >
> >
> > On Thu, 21 May 2015, Joe Perches wrote:
> >
> > > On Thu, 2015-05-21 at 15:50 -0400, Mike Shuey wrote:
> > > > Fix many checkpatch.pl warnings.
> > > []
> > > > diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c
> b/drivers/staging/lustre/lnet/lnet/acceptor.c
> > > []
> > > > @@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t
> peer_nid,
> > > >     switch (rc) {
> > > >     /* "normal" errors */
> > > >     case -ECONNREFUSED:
> > > > -           CNETERR("Connection to %s at host %pI4h on port %d was
> refused: check that Lustre is running on that node.\n",
> > > > -                   libcfs_nid2str(peer_nid),
> > > > -                   &peer_ip, peer_port);
> > > > +           CNETERR(
> > > > +                   "Connection to %s at host %pI4h on port %d was
> refused: check that Lustre is running on that node.\n",
> > > > +                   libcfs_nid2str(peer_nid), &peer_ip, peer_port);
> > >
> > > These are not improvements and checkpatch messages aren't dicta.
> > >
> > > Please don't convert code unless the conversion makes it better
> > > for a human reader.
> > >
> > > These don't.
> >
> > I haven't looked into it, but perhaps there is a standard kernel printing
> > function that these could be converted to directly?
> >
> > julia
>
>

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
       [not found]     ` <CABqvwjs=S1VrCHe7M4JnDikGBVN3yqjNMrtSA=UvftGmxSasRQ@mail.gmail.com>
  2015-05-22  3:06       ` Joe Perches
@ 2015-05-22  8:04       ` Drokin, Oleg
  2015-05-22 10:00         ` Julia Lawall
  1 sibling, 1 reply; 54+ messages in thread
From: Drokin, Oleg @ 2015-05-22  8:04 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Joe Perches, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>, hpdd-discuss@lists.01.org,
	<lustre-deve@lists.lustre.org>

Removal of lustre-added typedefs is worthwhile, actually.
I scraped the surface some time ago, but could not complete it back then.

On May 21, 2015, at 5:47 PM, Michael Shuey wrote:

> I've been killing off a *lot* of checkpatch warnings, and I'm probably getting a tad overzealous.  I'll drop these from the patch series next time I rebase, and avoid doing this in the future.  Thanks for the input.
> 
> Any suggestions on other checkpatch warnings?  Most of what remains are "don't introduce new typedefs" warnings - should these be removed as well, or am I safe to leave these?  I ask because these changes will be huge, and are unlikely to improve readability (but I don't know where the kernel community stands on having billions of typedefs everywhere.
> 
> --
> Mike Shuey
> 
> On Thu, May 21, 2015 at 5:00 PM, Joe Perches <joe@perches.com> wrote:
> On Thu, 2015-05-21 at 15:50 -0400, Mike Shuey wrote:
> > Fix many checkpatch.pl warnings.
> []
> > diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
> []
> > @@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
> >       switch (rc) {
> >       /* "normal" errors */
> >       case -ECONNREFUSED:
> > -             CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
> > -                     libcfs_nid2str(peer_nid),
> > -                     &peer_ip, peer_port);
> > +             CNETERR(
> > +                     "Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
> > +                     libcfs_nid2str(peer_nid), &peer_ip, peer_port);
> 
> These are not improvements and checkpatch messages aren't dicta.
> 
> Please don't convert code unless the conversion makes it better
> for a human reader.
> 
> These don't.
> 
> 
> 


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-22  5:06         ` Julia Lawall
@ 2015-05-22  8:08           ` Drokin, Oleg
  2015-05-22 15:42             ` Joe Perches
  0 siblings, 1 reply; 54+ messages in thread
From: Drokin, Oleg @ 2015-05-22  8:08 UTC (permalink / raw)
  To: Julia Lawall
  Cc: Michael Shuey, Joe Perches, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>


On May 22, 2015, at 1:06 AM, Julia Lawall wrote:

> On Thu, 21 May 2015, Michael Shuey wrote:
> 
>> That's a task (of many) I've been putting on the back burner until the code
>> is cleaner.  It's also a HUGE change, since there are debug macros
>> everywhere, and they all check a #define'd mask to see if they should fire,
>> and the behavior is likely governed by parts of the lustre user land tools
>> as well.
>> 
>> Suggestions are welcome.  Do other parts of the linux kernel define complex
>> debugging macros like these, or is this a lustre-ism?  Any suggestions on
>> how to handle this more in line with existing drivers?
> 
> Once you decide what to do, you can use Coccinelle to make the changes for
> you.  So you shouldn't be put off by the number of code sites to change.
> 
> The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
> Joe's suggestions if you really need something more complicated.

Ideally leaving CERROR/CDEBUG in Lustre would be desirable from my perspective.
It allows you fine grained control about what to collect and what to output
into a (quite finite) kernel buffer (and over a quite slow serial console)
and at the same time if you need more info, there's a buffer you can fetch
separately that can grow much bigger and there's even a way to run a special
daemon to scrub the buffer eagerly so none of it is lost.

Bye,
    Oleg

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 00/13] staging: lustre: lnet: code cleanups
  2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
                   ` (21 preceding siblings ...)
  2015-05-21 19:50 ` [PATCH 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
@ 2015-05-22  9:21 ` Dan Carpenter
       [not found]   ` <CABqvwjsJXJYciMDHqy9vEdL6dGLhx3+Pi0_ro6192Z5SGR9Q_w@mail.gmail.com>
  22 siblings, 1 reply; 54+ messages in thread
From: Dan Carpenter @ 2015-05-22  9:21 UTC (permalink / raw)
  To: Mike Shuey
  Cc: oleg.drokin, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-deve

On Thu, May 21, 2015 at 03:50:23PM -0400, Mike Shuey wrote:
> This patch series cleans up code in staging/lustre/lnet - mostly spacing
> and dead function cleanup.  Should apply against linux-next 20150518.

Why are you sending a v4 of this?  We don't actually enjoy reviewing the
same patchset over and over...

Update:  I have investigated and it is because you add a few new patches
to this set which were not in the original.  Just assume that the
original patchset will be merge and send the additional patches as a new
set.

Also since no one replied to the v3 patchset and Greg has a 5000 patches
in between v3 and v4 he will not seen v4 until days after he has applied
v3 so it causes confusion.

Also you sent a v5 of this patchset but without a v5 label...

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-22  8:04       ` Drokin, Oleg
@ 2015-05-22 10:00         ` Julia Lawall
  0 siblings, 0 replies; 54+ messages in thread
From: Julia Lawall @ 2015-05-22 10:00 UTC (permalink / raw)
  To: Drokin, Oleg
  Cc: Michael Shuey, Joe Perches, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>, hpdd-discuss@lists.01.org,
	<lustre-deve@lists.lustre.org>



On Fri, 22 May 2015, Drokin, Oleg wrote:

> Removal of lustre-added typedefs is worthwhile, actually.
> I scraped the surface some time ago, but could not complete it back then.
>
> On May 21, 2015, at 5:47 PM, Michael Shuey wrote:
>
>> I've been killing off a *lot* of checkpatch warnings, and I'm probably getting a tad overzealous.  I'll drop these from the patch series next time I rebase, and avoid doing this in the future.  Thanks for the input.
>>
>> Any suggestions on other checkpatch warnings?  Most of what remains are "don't introduce new typedefs" warnings - should these be removed as well, or am I safe to leave these?  I ask because these changes will be huge, and are unlikely to improve readability (but I don't know where the kernel community stands on having billions of typedefs everywhere.

There is discussion in the kernel documentation about typedefs.  Typedefs 
on structure types are particularly discouraged, to my recollection.

julia


>>
>> --
>> Mike Shuey
>>
>> On Thu, May 21, 2015 at 5:00 PM, Joe Perches <joe@perches.com> wrote:
>> On Thu, 2015-05-21 at 15:50 -0400, Mike Shuey wrote:
>>> Fix many checkpatch.pl warnings.
>> []
>>> diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
>> []
>>> @@ -99,38 +99,42 @@ lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
>>>       switch (rc) {
>>>       /* "normal" errors */
>>>       case -ECONNREFUSED:
>>> -             CNETERR("Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
>>> -                     libcfs_nid2str(peer_nid),
>>> -                     &peer_ip, peer_port);
>>> +             CNETERR(
>>> +                     "Connection to %s at host %pI4h on port %d was refused: check that Lustre is running on that node.\n",
>>> +                     libcfs_nid2str(peer_nid), &peer_ip, peer_port);
>>
>> These are not improvements and checkpatch messages aren't dicta.
>>
>> Please don't convert code unless the conversion makes it better
>> for a human reader.
>>
>> These don't.
>>
>>
>>
>
> --
> To unsubscribe from this list: send the line "unsubscribe kernel-janitors" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes
  2015-05-21 19:50 ` [PATCH v4 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes Mike Shuey
@ 2015-05-22 10:49   ` Dan Carpenter
  0 siblings, 0 replies; 54+ messages in thread
From: Dan Carpenter @ 2015-05-22 10:49 UTC (permalink / raw)
  To: Mike Shuey
  Cc: oleg.drokin, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-deve

On Thu, May 21, 2015 at 03:50:34PM -0400, Mike Shuey wrote:
> @@ -2758,7 +2763,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
>  		i = 2;
>  		while (atomic_read(&net->ibn_npeers) != 0) {
>  			i++;
> -			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET, /* 2**n? */
> +			CDEBUG(((i & (-i)) = i) ? D_WARNING : D_NET,/* 2**n? */

The original was better.  I don't like this patch for the same reasons
that people complained about patch 10.  It does too many things as well.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes
  2015-05-21 19:50 ` [PATCH 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
@ 2015-05-22 10:55   ` Dan Carpenter
  0 siblings, 0 replies; 54+ messages in thread
From: Dan Carpenter @ 2015-05-22 10:55 UTC (permalink / raw)
  To: Mike Shuey
  Cc: oleg.drokin, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-deve

On Thu, May 21, 2015 at 03:50:49PM -0400, Mike Shuey wrote:
> Cleaning up more checkpatch.pl issues.
> 

One thing per patch.  Better changelogs.  Yada yada yada.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace
  2015-05-21 19:50 ` [PATCH v4 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
@ 2015-05-22 13:03   ` Dan Carpenter
  0 siblings, 0 replies; 54+ messages in thread
From: Dan Carpenter @ 2015-05-22 13:03 UTC (permalink / raw)
  To: Mike Shuey
  Cc: oleg.drokin, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-deve

On Thu, May 21, 2015 at 03:50:28PM -0400, Mike Shuey wrote:
> diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
> index cd664d0..7f52c69 100644
> --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
> +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
> @@ -80,42 +80,47 @@
>  #define IBLND_N_SCHED_HIGH		4
>  
>  typedef struct {
> -	int	      *kib_dev_failover;     /* HCA failover */
> -	unsigned int     *kib_service;	  /* IB service number */
> -	int	      *kib_min_reconnect_interval; /* first failed connection retry... */
> -	int	      *kib_max_reconnect_interval; /* ...exponentially increasing to this */
> -	int	      *kib_cksum;	    /* checksum kib_msg_t? */
> -	int	      *kib_timeout;	  /* comms timeout (seconds) */
> -	int	      *kib_keepalive;	/* keepalive timeout (seconds) */
> -	int	      *kib_ntx;	      /* # tx descs */
> -	int	      *kib_credits;	  /* # concurrent sends */
> -	int	      *kib_peertxcredits;    /* # concurrent sends to 1 peer */
> -	int	      *kib_peerrtrcredits;   /* # per-peer router buffer credits */
> -	int	      *kib_peercredits_hiw;  /* # when eagerly to return credits */
> -	int	      *kib_peertimeout;      /* seconds to consider peer dead */
> -	char	    **kib_default_ipif;     /* default IPoIB interface */
> -	int	      *kib_retry_count;
> -	int	      *kib_rnr_retry_count;
> -	int	      *kib_concurrent_sends; /* send work queue sizing */
> -	int		 *kib_ib_mtu;		/* IB MTU */
> -	int	      *kib_map_on_demand;    /* map-on-demand if RD has more fragments
> -						 * than this value, 0 disable map-on-demand */
> -	int	      *kib_pmr_pool_size;    /* # physical MR in pool */
> -	int	      *kib_fmr_pool_size;    /* # FMRs in pool */
> -	int	      *kib_fmr_flush_trigger; /* When to trigger FMR flush */
> -	int	      *kib_fmr_cache;	/* enable FMR pool cache? */
> -	int	      *kib_require_priv_port;/* accept only privileged ports */
> -	int	      *kib_use_priv_port;    /* use privileged port for active connect */
> -	/* # threads on each CPT */
> -	int		 *kib_nscheds;
> +	int          *kib_dev_failover;      /* HCA failover */
> +	unsigned int *kib_service;           /* IB service number */
> +	int          *kib_min_reconnect_interval; /* first failed connection
> +						   * retry... */
> +	int          *kib_max_reconnect_interval; /* ...exponentially increasing
> +						   * to this */
> +	int          *kib_cksum;             /* checksum kib_msg_t? */
> +	int          *kib_timeout;           /* comms timeout (seconds) */
> +	int          *kib_keepalive;         /* keepalive timeout (seconds) */
> +	int          *kib_ntx;               /* # tx descs */
> +	int          *kib_credits;           /* # concurrent sends */
> +	int          *kib_peertxcredits;     /* # concurrent sends to 1 peer */
> +	int          *kib_peerrtrcredits;    /* # per-peer router buffer
> +					      * credits */
> +	int          *kib_peercredits_hiw;   /* # when eagerly to return
> +					      * credits */
> +	int          *kib_peertimeout;       /* seconds to consider peer dead */
> +	char         **kib_default_ipif;     /* default IPoIB interface */
> +	int          *kib_retry_count;
> +	int          *kib_rnr_retry_count;
> +	int          *kib_concurrent_sends;  /* send work queue sizing */
> +	int          *kib_ib_mtu;            /* IB MTU */
> +	int          *kib_map_on_demand;     /* map-on-demand if RD has more
> +					      * fragments than this value, 0
> +					      * disable map-on-demand */
> +	int          *kib_pmr_pool_size;     /* # physical MR in pool */
> +	int          *kib_fmr_pool_size;     /* # FMRs in pool */
> +	int          *kib_fmr_flush_trigger; /* When to trigger FMR flush */
> +	int          *kib_fmr_cache;         /* enable FMR pool cache? */
> +	int          *kib_require_priv_port; /* accept only privileged ports */
> +	int          *kib_use_priv_port;     /* use privileged port for active
> +					      * connect */
> +	int          *kib_nscheds;           /* # threads on each CPT */
>  } kib_tunables_t;

The lustre people have their own style guidelines for these things in
the .h file.  We should probably match theirs.

I don't care one way or the other, what style people use for this.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-22  8:08           ` Drokin, Oleg
@ 2015-05-22 15:42             ` Joe Perches
  2015-05-22 21:16               ` Drokin, Oleg
  0 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-22 15:42 UTC (permalink / raw)
  To: Drokin, Oleg
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>

On Fri, 2015-05-22 at 08:08 +0000, Drokin, Oleg wrote:
> On May 22, 2015, at 1:06 AM, Julia Lawall wrote:
> 
> > On Thu, 21 May 2015, Michael Shuey wrote:
> > 
> >> That's a task (of many) I've been putting on the back burner until the code
> >> is cleaner.  It's also a HUGE change, since there are debug macros
> >> everywhere, and they all check a #define'd mask to see if they should fire,
> >> and the behavior is likely governed by parts of the lustre user land tools
> >> as well.
> >> 
> >> Suggestions are welcome.  Do other parts of the linux kernel define complex
> >> debugging macros like these, or is this a lustre-ism?  Any suggestions on
> >> how to handle this more in line with existing drivers?
> > 
> > Once you decide what to do, you can use Coccinelle to make the changes for
> > you.  So you shouldn't be put off by the number of code sites to change.
> > 
> > The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
> > Joe's suggestions if you really need something more complicated.
> 
> Ideally leaving CERROR/CDEBUG in Lustre would be desirable from my perspective.

My issue with CERROR is the name is little misleading.
It's actually a debugging message.
#define CERROR(format, ...)  CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)

I think it'd be clearer as
	lustre_debug(ERROR, ...
even if the name and use style is a little longer.

> It allows you fine grained control about what to collect and what to output
> into a (quite finite) kernel buffer (and over a quite slow serial console)
> and at the same time if you need more info, there's a buffer you can fetch
> separately that can grow much bigger and there's even a way to run a special
> daemon to scrub the buffer eagerly so none of it is lost.



^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-22 15:42             ` Joe Perches
@ 2015-05-22 21:16               ` Drokin, Oleg
  2015-05-22 23:57                 ` Joe Perches
  0 siblings, 1 reply; 54+ messages in thread
From: Drokin, Oleg @ 2015-05-22 21:16 UTC (permalink / raw)
  To: Joe Perches
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>


On May 22, 2015, at 11:42 AM, Joe Perches wrote:

> On Fri, 2015-05-22 at 08:08 +0000, Drokin, Oleg wrote:
>> On May 22, 2015, at 1:06 AM, Julia Lawall wrote:
>> 
>>> On Thu, 21 May 2015, Michael Shuey wrote:
>>> 
>>>> That's a task (of many) I've been putting on the back burner until the code
>>>> is cleaner.  It's also a HUGE change, since there are debug macros
>>>> everywhere, and they all check a #define'd mask to see if they should fire,
>>>> and the behavior is likely governed by parts of the lustre user land tools
>>>> as well.
>>>> 
>>>> Suggestions are welcome.  Do other parts of the linux kernel define complex
>>>> debugging macros like these, or is this a lustre-ism?  Any suggestions on
>>>> how to handle this more in line with existing drivers?
>>> 
>>> Once you decide what to do, you can use Coccinelle to make the changes for
>>> you.  So you shouldn't be put off by the number of code sites to change.
>>> 
>>> The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
>>> Joe's suggestions if you really need something more complicated.
>> 
>> Ideally leaving CERROR/CDEBUG in Lustre would be desirable from my perspective.
> 
> My issue with CERROR is the name is little misleading.
> It's actually a debugging message.
> #define CERROR(format, ...)  CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)

Except it's not a debugging message.
There is a clear distinction.

CERROR is something that get's printed on the console, because it's believed
to be serious error (At least that's how the theory for it's usage goes).
It also gets rate-limited so that the console does not get overflown.
(but the debug buffer gets the full version).
(there's also LCONSOLE that always get's printed, but it does not get the
prefixes like line numbers and stuff).

CDEBUG on the other hand is a debugging message (of which ERROR messages are
sort of a subset (D_ERROR mask)). You can fine-tune those to be noops or
to go into console or to debug buffer only. Most of those are doing nothing
because they are off in the default debug mask, until actually enabled.

That CERROR usees CDEBUG underneath is just to share some common infrastructure.

> I think it'd be clearer as
> 	lustre_debug(ERROR, ...
> even if the name and use style is a little longer.

I wonder what is more clear about that in your opinion ve
lustre_error/lustre_debug?

Bye,
    Oleg


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-22 21:16               ` Drokin, Oleg
@ 2015-05-22 23:57                 ` Joe Perches
  2015-05-23  0:07                   ` Drokin, Oleg
  0 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-22 23:57 UTC (permalink / raw)
  To: Drokin, Oleg
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>

On Fri, 2015-05-22 at 21:16 +0000, Drokin, Oleg wrote:
> On May 22, 2015, at 11:42 AM, Joe Perches wrote:
> 
> > On Fri, 2015-05-22 at 08:08 +0000, Drokin, Oleg wrote:
> >> On May 22, 2015, at 1:06 AM, Julia Lawall wrote:
> >> 
> >>> On Thu, 21 May 2015, Michael Shuey wrote:
> >>> 
> >>>> That's a task (of many) I've been putting on the back burner until the code
> >>>> is cleaner.  It's also a HUGE change, since there are debug macros
> >>>> everywhere, and they all check a #define'd mask to see if they should fire,
> >>>> and the behavior is likely governed by parts of the lustre user land tools
> >>>> as well.
> >>>> 
> >>>> Suggestions are welcome.  Do other parts of the linux kernel define complex
> >>>> debugging macros like these, or is this a lustre-ism?  Any suggestions on
> >>>> how to handle this more in line with existing drivers?
> >>> 
> >>> Once you decide what to do, you can use Coccinelle to make the changes for
> >>> you.  So you shouldn't be put off by the number of code sites to change.
> >>> 
> >>> The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
> >>> Joe's suggestions if you really need something more complicated.
> >> 
> >> Ideally leaving CERROR/CDEBUG in Lustre would be desirable from my perspective.
> > 
> > My issue with CERROR is the name is little misleading.
> > It's actually a debugging message.
> > #define CERROR(format, ...)  CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
> 
> Except it's not a debugging message.
> There is a clear distinction.

Not really.  If the first reading sjows that the mechanism it
goes through is called CDEBUG, a reasonable expectation should
be that it's a debugging message.

> CERROR is something that get's printed on the console, because it's believed
> to be serious error (At least that's how the theory for it's usage goes).
> It also gets rate-limited so that the console does not get overflown.
> (but the debug buffer gets the full version).
> (there's also LCONSOLE that always get's printed, but it does not get the
> prefixes like line numbers and stuff).
> 
> CDEBUG on the other hand is a debugging message (of which ERROR messages are
> sort of a subset (D_ERROR mask)). You can fine-tune those to be noops or
> to go into console or to debug buffer only. Most of those are doing nothing
> because they are off in the default debug mask, until actually enabled.
> 
> That CERROR usees CDEBUG underneath is just to share some common infrastructure.
> 
> > I think it'd be clearer as
> > 	lustre_debug(ERROR, ...
> > even if the name and use style is a little longer.
> 
> I wonder what is more clear about that in your opinion ve
> lustre_error/lustre_debug?

The fact that you have to explain this shows that it's
at least misleading unless you completely understand the
code.

It'd be more intelligible if this CERROR became lustre_err
and the actual debugging uses were lustre_dbg

Perhaps it needs a better explanation somewhere not in the
code but in some external documentation.  I haven't looked.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-22 23:57                 ` Joe Perches
@ 2015-05-23  0:07                   ` Drokin, Oleg
  2015-05-23  0:18                     ` Joe Perches
  0 siblings, 1 reply; 54+ messages in thread
From: Drokin, Oleg @ 2015-05-23  0:07 UTC (permalink / raw)
  To: Joe Perches
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>


On May 22, 2015, at 7:57 PM, Joe Perches wrote:

> On Fri, 2015-05-22 at 21:16 +0000, Drokin, Oleg wrote:
>> On May 22, 2015, at 11:42 AM, Joe Perches wrote:
>> 
>>> On Fri, 2015-05-22 at 08:08 +0000, Drokin, Oleg wrote:
>>>> On May 22, 2015, at 1:06 AM, Julia Lawall wrote:
>>>> 
>>>>> On Thu, 21 May 2015, Michael Shuey wrote:
>>>>> 
>>>>>> That's a task (of many) I've been putting on the back burner until the code
>>>>>> is cleaner.  It's also a HUGE change, since there are debug macros
>>>>>> everywhere, and they all check a #define'd mask to see if they should fire,
>>>>>> and the behavior is likely governed by parts of the lustre user land tools
>>>>>> as well.
>>>>>> 
>>>>>> Suggestions are welcome.  Do other parts of the linux kernel define complex
>>>>>> debugging macros like these, or is this a lustre-ism?  Any suggestions on
>>>>>> how to handle this more in line with existing drivers?
>>>>> 
>>>>> Once you decide what to do, you can use Coccinelle to make the changes for
>>>>> you.  So you shouldn't be put off by the number of code sites to change.
>>>>> 
>>>>> The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
>>>>> Joe's suggestions if you really need something more complicated.
>>>> 
>>>> Ideally leaving CERROR/CDEBUG in Lustre would be desirable from my perspective.
>>> 
>>> My issue with CERROR is the name is little misleading.
>>> It's actually a debugging message.
>>> #define CERROR(format, ...)  CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
>> 
>> Except it's not a debugging message.
>> There is a clear distinction.
> 
> Not really.  If the first reading sjows that the mechanism it
> goes through is called CDEBUG, a reasonable expectation should
> be that it's a debugging message.

Well, various pr_err/pr_dbg for example, go through printk in the end too.
Do that make them the same?

> 
>> CERROR is something that get's printed on the console, because it's believed
>> to be serious error (At least that's how the theory for it's usage goes).
>> It also gets rate-limited so that the console does not get overflown.
>> (but the debug buffer gets the full version).
>> (there's also LCONSOLE that always get's printed, but it does not get the
>> prefixes like line numbers and stuff).
>> 
>> CDEBUG on the other hand is a debugging message (of which ERROR messages are
>> sort of a subset (D_ERROR mask)). You can fine-tune those to be noops or
>> to go into console or to debug buffer only. Most of those are doing nothing
>> because they are off in the default debug mask, until actually enabled.
>> 
>> That CERROR usees CDEBUG underneath is just to share some common infrastructure.
>> 
>>> I think it'd be clearer as
>>> 	lustre_debug(ERROR, ...
>>> even if the name and use style is a little longer.
>> 
>> I wonder what is more clear about that in your opinion ve
>> lustre_error/lustre_debug?
> 
> The fact that you have to explain this shows that it's
> at least misleading unless you completely understand the
> code.

Or you know, you might take the function name at the face value
and assume that CERROR means it's an error and CDEBUG means it's a debug message?

> It'd be more intelligible if this CERROR became lustre_err
> and the actual debugging uses were lustre_dbg

But the actual underlying call is hidden by the macro anyway and you never
get to see it. You see CDEBUG/CERROR and how is that different
from lustre_debug/lustre_err?


> Perhaps it needs a better explanation somewhere not in the
> code but in some external documentation.  I haven't looked.
> 


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-23  0:07                   ` Drokin, Oleg
@ 2015-05-23  0:18                     ` Joe Perches
  2015-05-23  0:25                       ` Drokin, Oleg
  0 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-23  0:18 UTC (permalink / raw)
  To: Drokin, Oleg
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>

On Sat, 2015-05-23 at 00:07 +0000, Drokin, Oleg wrote:
> On May 22, 2015, at 7:57 PM, Joe Perches wrote:
> > On Fri, 2015-05-22 at 21:16 +0000, Drokin, Oleg wrote:
> >> On May 22, 2015, at 11:42 AM, Joe Perches wrote:
> >>> On Fri, 2015-05-22 at 08:08 +0000, Drokin, Oleg wrote:
> >>>> On May 22, 2015, at 1:06 AM, Julia Lawall wrote:
> >>>>> On Thu, 21 May 2015, Michael Shuey wrote:
> >>>>> 
> >>>>>> That's a task (of many) I've been putting on the back burner until the code
> >>>>>> is cleaner.  It's also a HUGE change, since there are debug macros
> >>>>>> everywhere, and they all check a #define'd mask to see if they should fire,
> >>>>>> and the behavior is likely governed by parts of the lustre user land tools
> >>>>>> as well.
> >>>>>> 
> >>>>>> Suggestions are welcome.  Do other parts of the linux kernel define complex
> >>>>>> debugging macros like these, or is this a lustre-ism?  Any suggestions on
> >>>>>> how to handle this more in line with existing drivers?
> >>>>> 
> >>>>> Once you decide what to do, you can use Coccinelle to make the changes for
> >>>>> you.  So you shouldn't be put off by the number of code sites to change.
> >>>>> 
> >>>>> The normal functions are pr_err, pr_warn, etc.  Perhaps you can follow
> >>>>> Joe's suggestions if you really need something more complicated.
> >>>> 
> >>>> Ideally leaving CERROR/CDEBUG in Lustre would be desirable from my perspective.
> >>> 
> >>> My issue with CERROR is the name is little misleading.
> >>> It's actually a debugging message.
> >>> #define CERROR(format, ...)  CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
> >> 
> >> Except it's not a debugging message.
> >> There is a clear distinction.
> > 
> > Not really.  If the first reading shows that the mechanism it
> > goes through is called CDEBUG, a reasonable expectation should
> > be that it's a debugging message.
> 
> Well, various pr_err/pr_dbg for example, go through printk in the end too.
> Do that make them the same?

No, because each is labeled with the KERN_<level> that it uses.

[]

> >> I wonder what is more clear about that in your opinion ve
> >> lustre_error/lustre_debug?
> > 
> > The fact that you have to explain this shows that it's
> > at least misleading unless you completely understand the
> > code.
> 
> Or you know, you might take the function name at the face value
> and assume that CERROR means it's an error and CDEBUG means it's a debug message?

Maybe, but I think that it'd be better if the mechanism
it uses was more plainly named something like lustre_log.




^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-23  0:18                     ` Joe Perches
@ 2015-05-23  0:25                       ` Drokin, Oleg
  2015-05-23  0:36                         ` Joe Perches
  0 siblings, 1 reply; 54+ messages in thread
From: Drokin, Oleg @ 2015-05-23  0:25 UTC (permalink / raw)
  To: Joe Perches
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>


On May 22, 2015, at 8:18 PM, Joe Perches wrote:
>>>> I wonder what is more clear about that in your opinion ve
>>>> lustre_error/lustre_debug?
>>> 
>>> The fact that you have to explain this shows that it's
>>> at least misleading unless you completely understand the
>>> code.
>> 
>> Or you know, you might take the function name at the face value
>> and assume that CERROR means it's an error and CDEBUG means it's a debug message?
> 
> Maybe, but I think that it'd be better if the mechanism
> it uses was more plainly named something like lustre_log.

While the idea seems good, the biggest obstacle here is such that
there's already a thing called lustre log (llog for short too) -
it's kind of a distributed journal of operations.

Its there a different synonym, I wonder?

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-23  0:25                       ` Drokin, Oleg
@ 2015-05-23  0:36                         ` Joe Perches
  2015-05-23  3:13                           ` [HPDD-discuss] " Patrick Farrell
  0 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-23  0:36 UTC (permalink / raw)
  To: Drokin, Oleg
  Cc: Julia Lawall, Michael Shuey, <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>

On Sat, 2015-05-23 at 00:25 +0000, Drokin, Oleg wrote:
> On May 22, 2015, at 8:18 PM, Joe Perches wrote:
> >>>> I wonder what is more clear about that in your opinion ve
> >>>> lustre_error/lustre_debug?
> >>> 
> >>> The fact that you have to explain this shows that it's
> >>> at least misleading unless you completely understand the
> >>> code.
> >> 
> >> Or you know, you might take the function name at the face value
> >> and assume that CERROR means it's an error and CDEBUG means it's a debug message?
> > 
> > Maybe, but I think that it'd be better if the mechanism
> > it uses was more plainly named something like lustre_log.
> 
> While the idea seems good, the biggest obstacle here is such that
> there's already a thing called lustre log (llog for short too) -
> it's kind of a distributed journal of operations.
> 
> Its there a different synonym, I wonder?

Maybe: lustre_printk, lustre_logmsg, lustre_output




^ permalink raw reply	[flat|nested] 54+ messages in thread

* RE: [HPDD-discuss] [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes
  2015-05-23  0:36                         ` Joe Perches
@ 2015-05-23  3:13                           ` Patrick Farrell
  0 siblings, 0 replies; 54+ messages in thread
From: Patrick Farrell @ 2015-05-23  3:13 UTC (permalink / raw)
  To: Joe Perches, Drokin, Oleg
  Cc: <devel@driverdev.osuosl.org>,
	<gregkh@linuxfoundation.org>,
	<kernel-janitors@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>, Julia Lawall,
	<HPDD-discuss@ml01.01.org>,
	<lustre-devel@lists.lustre.org>

Since it is not actually doing a printk - at least, not necessarily - I like lustre_logmsg.  lustre_output seems too vague.

- Patrick
________________________________________
From: HPDD-discuss [hpdd-discuss-bounces@lists.01.org] on behalf of Joe Perches [joe@perches.com]
Sent: Friday, May 22, 2015 7:36 PM
To: Drokin, Oleg
Cc: <devel@driverdev.osuosl.org>; <gregkh@linuxfoundation.org>; <kernel-janitors@vger.kernel.org>; <linux-kernel@vger.kernel.org>; Julia Lawall; <HPDD-discuss@ml01.01.org>; <lustre-devel@lists.lustre.org>
Subject: Re: [HPDD-discuss] [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes

On Sat, 2015-05-23 at 00:25 +0000, Drokin, Oleg wrote:
> On May 22, 2015, at 8:18 PM, Joe Perches wrote:
> >>>> I wonder what is more clear about that in your opinion ve
> >>>> lustre_error/lustre_debug?
> >>>
> >>> The fact that you have to explain this shows that it's
> >>> at least misleading unless you completely understand the
> >>> code.
> >>
> >> Or you know, you might take the function name at the face value
> >> and assume that CERROR means it's an error and CDEBUG means it's a debug message?
> >
> > Maybe, but I think that it'd be better if the mechanism
> > it uses was more plainly named something like lustre_log.
>
> While the idea seems good, the biggest obstacle here is such that
> there's already a thing called lustre log (llog for short too) -
> it's kind of a distributed journal of operations.
>
> Its there a different synonym, I wonder?

Maybe: lustre_printk, lustre_logmsg, lustre_output



_______________________________________________
HPDD-discuss mailing list
HPDD-discuss@lists.01.org
https://lists.01.org/mailman/listinfo/hpdd-discuss

^ permalink raw reply	[flat|nested] 54+ messages in thread

* [PATCH] checkpatch: Categorize some long line length checks
  2015-05-21 21:00   ` Joe Perches
  2015-05-21 21:29     ` Julia Lawall
       [not found]     ` <CABqvwjs=S1VrCHe7M4JnDikGBVN3yqjNMrtSA=UvftGmxSasRQ@mail.gmail.com>
@ 2015-05-23  6:28     ` Joe Perches
  2015-05-23 11:32       ` Julia Lawall
  2 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-23  6:28 UTC (permalink / raw)
  To: Andrew Morton
  Cc: oleg.drokin, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-deve, Dan Carpenter, Mike Shuey

Many lines of code extend beyond the maximum line length.
Some of these are possibly justified by use type.

For instance:

structure definitions where comments are added per member like

struct foo {
	type member;		/* some long description */
}

And lines that don't fit the typical logging message style
where a string constant is used like:

	SOME_MACRO(args, "Some long string");

Categorize these long line types so that checkpatch can use
a command-line --ignore=<type> option to avoid emitting some
long line warnings.

Comment the code a bit better too.

Signed-off-by: Joe Perches <joe@perches.com>
---
 scripts/checkpatch.pl | 54 +++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 44 insertions(+), 10 deletions(-)

diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 89b1df4..99ce3f4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2510,16 +2510,50 @@ sub process {
 # check we are in a valid source file if not then ignore this hunk
 		next if ($realfile !~ /\.(h|c|s|S|pl|sh|dtsi|dts)$/);
 
-#line length limit
-		if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
-		    $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
-		    !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?$String\s*(?:|,|\)\s*;)\s*$/ ||
-		      $line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
-		      $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) &&
-		    $length > $max_line_length)
-		{
-			WARN("LONG_LINE",
-			     "line over $max_line_length characters\n" . $herecurr);
+# line length limit (with some exclusions)
+#
+# There are 3 different line length message types:
+# LONG_LINE_COMMENT	a comment starts before but extends beyond length
+# LONG_LINE_STRING	a string starts before but extends beyond length
+# LONG_LINE		all other lines longer than $max_line_length
+#
+# if LONG_LINE is ignored, the other 2 types are also ignored
+#
+# LONG_LINE has a few types of lines that may extend beyong $max_line_length
+#	kernel-doc arguments
+#	logging functions like pr_info that end in a string
+#	lines with a single string
+#	#defines that are a single string
+
+		if ($length > $max_line_length) {
+			my $msg_type = "";
+
+			# comment starts before $max_line_length
+			if ($line =~ /([\s$;]+)$/ &&
+			    length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
+				$msg_type = "LONG_LINE_COMMENT"
+
+			# quoted string starts before $max_line_length
+			} elsif ($sline =~ /\s*($String(?:\s*(?:\\|,\s*|\)\s*;\s*))?)$/ &&
+				 length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
+				$msg_type = "LONG_LINE_STRING"
+
+			# general long longs
+				# exclude kernel-doc argument lines
+			} elsif ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
+				 $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
+				# exclude logging functions that end in a string
+				 !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?$String\s*(?:|,|\)\s*;)\s*$/ ||
+				# exclude lines with only strings
+				   $line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
+				# exclude #defines with only strings
+				   $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/)) {
+				$msg_type = "LONG_LINE";
+			}
+			if ($msg_type ne "" && show_type("LONG_LINE")) {
+				WARN($msg_type,
+				     "line over $max_line_length characters\n" . $herecurr);
+			}
 		}
 
 # check for adding lines without a newline.



^ permalink raw reply related	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 00/13] staging: lustre: lnet: code cleanups
       [not found]   ` <CABqvwjsJXJYciMDHqy9vEdL6dGLhx3+Pi0_ro6192Z5SGR9Q_w@mail.gmail.com>
@ 2015-05-23 10:14     ` Dan Carpenter
  2015-05-23 12:09       ` Michael Shuey
  0 siblings, 1 reply; 54+ messages in thread
From: Dan Carpenter @ 2015-05-23 10:14 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Drokin, Oleg, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-devel

We would have applied the v3 patchset but now I don't know because we're
up to v5.  We can't apply v5 because there are problems with it.  No
one responded to v3 so Greg still might apply it or he might find these
email threads too scrambled and delete everything and ask for a resend.

It's pretty messed up so just wait for Greg to get to it before sending
more patches?

Basically you should only send patches which you assume will be applied.
If no one responds after 3 days then probably that means everyone from
the peanut gallery (Me, Sudip, Joe, the lustre devs), we don't have an
issue.  Then Greg does the last review (2-3 weeks later perhaps).  But
if it makes it past all the other reviews then generally Greg also will
be ok with it.

Greg applies patches in first come, first applied order.  If they don't
apply then you have to redo it.  He doesn't invest a lot of time into
figuring out why.  So you have to coordinate with the other devs, it's
up to you how you do that.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH] checkpatch: Categorize some long line length checks
  2015-05-23  6:28     ` [PATCH] checkpatch: Categorize some long line length checks Joe Perches
@ 2015-05-23 11:32       ` Julia Lawall
  2015-05-23 17:26         ` Joe Perches
  0 siblings, 1 reply; 54+ messages in thread
From: Julia Lawall @ 2015-05-23 11:32 UTC (permalink / raw)
  To: Joe Perches
  Cc: Andrew Morton, oleg.drokin, devel, gregkh, kernel-janitors,
	linux-kernel, HPDD-discuss, lustre-deve, Dan Carpenter,
	Mike Shuey



On Fri, 22 May 2015, Joe Perches wrote:

> Many lines of code extend beyond the maximum line length.
> Some of these are possibly justified by use type.
>
> For instance:
>
> structure definitions where comments are added per member like
>
> struct foo {
> 	type member;		/* some long description */

I'm not super fond of the comment one.  Perhaps people could express
themselves more concisely, or put the details elsewhere?

julia

> }
>
> And lines that don't fit the typical logging message style
> where a string constant is used like:
>
> 	SOME_MACRO(args, "Some long string");
>
> Categorize these long line types so that checkpatch can use
> a command-line --ignore=<type> option to avoid emitting some
> long line warnings.
>
> Comment the code a bit better too.
>
> Signed-off-by: Joe Perches <joe@perches.com>
> ---
>  scripts/checkpatch.pl | 54 +++++++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 44 insertions(+), 10 deletions(-)
>
> diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
> index 89b1df4..99ce3f4 100755
> --- a/scripts/checkpatch.pl
> +++ b/scripts/checkpatch.pl
> @@ -2510,16 +2510,50 @@ sub process {
>  # check we are in a valid source file if not then ignore this hunk
>  		next if ($realfile !~ /\.(h|c|s|S|pl|sh|dtsi|dts)$/);
>
> -#line length limit
> -		if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
> -		    $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
> -		    !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?$String\s*(?:|,|\)\s*;)\s*$/ ||
> -		      $line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
> -		      $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) &&
> -		    $length > $max_line_length)
> -		{
> -			WARN("LONG_LINE",
> -			     "line over $max_line_length characters\n" . $herecurr);
> +# line length limit (with some exclusions)
> +#
> +# There are 3 different line length message types:
> +# LONG_LINE_COMMENT	a comment starts before but extends beyond length
> +# LONG_LINE_STRING	a string starts before but extends beyond length
> +# LONG_LINE		all other lines longer than $max_line_length
> +#
> +# if LONG_LINE is ignored, the other 2 types are also ignored
> +#
> +# LONG_LINE has a few types of lines that may extend beyong $max_line_length
> +#	kernel-doc arguments
> +#	logging functions like pr_info that end in a string
> +#	lines with a single string
> +#	#defines that are a single string
> +
> +		if ($length > $max_line_length) {
> +			my $msg_type = "";
> +
> +			# comment starts before $max_line_length
> +			if ($line =~ /([\s$;]+)$/ &&
> +			    length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
> +				$msg_type = "LONG_LINE_COMMENT"
> +
> +			# quoted string starts before $max_line_length
> +			} elsif ($sline =~ /\s*($String(?:\s*(?:\\|,\s*|\)\s*;\s*))?)$/ &&
> +				 length(expand_tabs(substr($line, 1, length($line) - length($1) - 1))) <= $max_line_length) {
> +				$msg_type = "LONG_LINE_STRING"
> +
> +			# general long longs
> +				# exclude kernel-doc argument lines
> +			} elsif ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
> +				 $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
> +				# exclude logging functions that end in a string
> +				 !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?$String\s*(?:|,|\)\s*;)\s*$/ ||
> +				# exclude lines with only strings
> +				   $line =~ /^\+\s*$String\s*(?:\s*|,|\)\s*;)\s*$/ ||
> +				# exclude #defines with only strings
> +				   $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/)) {
> +				$msg_type = "LONG_LINE";
> +			}
> +			if ($msg_type ne "" && show_type("LONG_LINE")) {
> +				WARN($msg_type,
> +				     "line over $max_line_length characters\n" . $herecurr);
> +			}
>  		}
>
>  # check for adding lines without a newline.
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe kernel-janitors" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 00/13] staging: lustre: lnet: code cleanups
  2015-05-23 10:14     ` Dan Carpenter
@ 2015-05-23 12:09       ` Michael Shuey
  2015-05-23 12:51         ` Sudip Mukherjee
  0 siblings, 1 reply; 54+ messages in thread
From: Michael Shuey @ 2015-05-23 12:09 UTC (permalink / raw)
  To: Dan Carpenter
  Cc: Drokin, Oleg, devel, gregkh, kernel-janitors, linux-kernel,
	HPDD-discuss, lustre-devel

Hm, that's unfortunate - but my own fault for lack of proper
etiquette.  I'll give this a week or two to settle, and build up
patches against other parts of lustre in the meantime.

BTW, you keep mentioning a v5 that I sent.  Where is that, exactly?
The last round of patches I sent I've kept labeled as "PATCH v4", and
I only hit git send-email once.  Could you forward me something from
this v5 series, so I could see if anything is amiss on my end?
--
Mike Shuey


On Sat, May 23, 2015 at 6:14 AM, Dan Carpenter <dan.carpenter@oracle.com> wrote:
> We would have applied the v3 patchset but now I don't know because we're
> up to v5.  We can't apply v5 because there are problems with it.  No
> one responded to v3 so Greg still might apply it or he might find these
> email threads too scrambled and delete everything and ask for a resend.
>
> It's pretty messed up so just wait for Greg to get to it before sending
> more patches?
>
> Basically you should only send patches which you assume will be applied.
> If no one responds after 3 days then probably that means everyone from
> the peanut gallery (Me, Sudip, Joe, the lustre devs), we don't have an
> issue.  Then Greg does the last review (2-3 weeks later perhaps).  But
> if it makes it past all the other reviews then generally Greg also will
> be ok with it.
>
> Greg applies patches in first come, first applied order.  If they don't
> apply then you have to redo it.  He doesn't invest a lot of time into
> figuring out why.  So you have to coordinate with the other devs, it's
> up to you how you do that.
>
> regards,
> dan carpenter
>

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 00/13] staging: lustre: lnet: code cleanups
  2015-05-23 12:09       ` Michael Shuey
@ 2015-05-23 12:51         ` Sudip Mukherjee
  2015-05-23 14:05           ` Michael Shuey
  0 siblings, 1 reply; 54+ messages in thread
From: Sudip Mukherjee @ 2015-05-23 12:51 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Dan Carpenter, Drokin, Oleg, devel, gregkh, kernel-janitors,
	linux-kernel, HPDD-discuss, lustre-devel

On Sat, May 23, 2015 at 08:09:54AM -0400, Michael Shuey wrote:
> BTW, you keep mentioning a v5 that I sent.  Where is that, exactly?
> The last round of patches I sent I've kept labeled as "PATCH v4", and
> I only hit git send-email once.  Could you forward me something from
> this v5 series, so I could see if anything is amiss on my end?

I think it was not a v5. But what happened is in your series some
of the patches were marked as v4 and some were not having any version
so it appeared like a fresh series again. Like 1/13,2/13,3/13 has v4
but the others donot have a version.

regards
sudip

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 00/13] staging: lustre: lnet: code cleanups
  2015-05-23 12:51         ` Sudip Mukherjee
@ 2015-05-23 14:05           ` Michael Shuey
  2015-05-31  2:24             ` Greg KH
  0 siblings, 1 reply; 54+ messages in thread
From: Michael Shuey @ 2015-05-23 14:05 UTC (permalink / raw)
  To: Sudip Mukherjee
  Cc: Dan Carpenter, Drokin, Oleg, devel, gregkh, kernel-janitors,
	linux-kernel, HPDD-discuss, lustre-devel

Ah - that explains it.  I added additional files to the series, but
simply re-applied the original round of patches.  They would've not
been tagged as v4, while the patches pertaining to the newly-modified
files had v4 in the subject.

I'll be more thorough in future patch revisions.  Thanks for the explanation.
--
Mike Shuey


On Sat, May 23, 2015 at 8:39 AM, Sudip Mukherjee
<sudipm.mukherjee@gmail.com> wrote:
> On Sat, May 23, 2015 at 08:09:54AM -0400, Michael Shuey wrote:
>> BTW, you keep mentioning a v5 that I sent.  Where is that, exactly?
>> The last round of patches I sent I've kept labeled as "PATCH v4", and
>> I only hit git send-email once.  Could you forward me something from
>> this v5 series, so I could see if anything is amiss on my end?
>
> I think it was not a v5. But what happened is in your series some
> of the patches were marked as v4 and some were not having any version
> so it appeared like a fresh series again. Like 1/13,2/13,3/13 has v4
> but the others donot have a version.
>
> regards
> sudip

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH] checkpatch: Categorize some long line length checks
  2015-05-23 11:32       ` Julia Lawall
@ 2015-05-23 17:26         ` Joe Perches
  2015-05-23 18:07           ` Dan Carpenter
  0 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-23 17:26 UTC (permalink / raw)
  To: Julia Lawall
  Cc: Andrew Morton, oleg.drokin, devel, gregkh, kernel-janitors,
	linux-kernel, HPDD-discuss, lustre-deve, Dan Carpenter,
	Mike Shuey

On Sat, 2015-05-23 at 13:32 +0200, Julia Lawall wrote:
> On Fri, 22 May 2015, Joe Perches wrote:
> > Many lines of code extend beyond the maximum line length.
> > Some of these are possibly justified by use type.
> >
> > For instance:
> >
> > structure definitions where comments are added per member like
> >
> > struct foo {
> > 	type member;		/* some long description */
> 
> I'm not super fond of the comment one.  Perhaps people could express
> themselves more concisely, or put the details elsewhere?

Concision is good, straining for brevity or bad
formatting isn't.

I've seen a lot of ugly patches lately to "fix"
code like this by making it worse.

By default, there is still a long_line warning for
this style.  It arguably could be appropriate to
keep some lines like this and this makes it easy
to tell people "add --ignore=<type>".

This patch shouldn't be applied right now anyway.

I think the idea is OK, but this implementation
could be improved and clarified by moving the
current exclusions before the classifications.

Anyone else have an opinion?

I'll send a V2 later unless there are more comments.




^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH] checkpatch: Categorize some long line length checks
  2015-05-23 17:26         ` Joe Perches
@ 2015-05-23 18:07           ` Dan Carpenter
  2015-05-23 18:13             ` Joe Perches
  0 siblings, 1 reply; 54+ messages in thread
From: Dan Carpenter @ 2015-05-23 18:07 UTC (permalink / raw)
  To: Joe Perches
  Cc: Julia Lawall, Andrew Morton, oleg.drokin, devel, gregkh,
	kernel-janitors, linux-kernel, HPDD-discuss, lustre-deve,
	Mike Shuey

I feel like the lustre headers could be fit into 80 characters without
losing very much.

No one uses the complicated options on checkpatch anyway, they just grep
away the warnings they don't like.  Newbies especially don't use them.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH] checkpatch: Categorize some long line length checks
  2015-05-23 18:07           ` Dan Carpenter
@ 2015-05-23 18:13             ` Joe Perches
  2015-05-23 18:21               ` Dan Carpenter
  0 siblings, 1 reply; 54+ messages in thread
From: Joe Perches @ 2015-05-23 18:13 UTC (permalink / raw)
  To: Dan Carpenter
  Cc: Julia Lawall, Andrew Morton, oleg.drokin, devel, gregkh,
	kernel-janitors, linux-kernel, HPDD-discuss, lustre-deve,
	Mike Shuey

On Sat, 2015-05-23 at 21:07 +0300, Dan Carpenter wrote:
> I feel like the lustre headers could be fit into 80 characters without
> losing very much.

Maybe.

> No one uses the complicated options on checkpatch anyway, they just grep
> away the warnings they don't like.

That'd be false.  Other projects like u-boot do.

> Newbies especially don't use them.

True, but capability exclusion doesn't seem the
right path.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH] checkpatch: Categorize some long line length checks
  2015-05-23 18:13             ` Joe Perches
@ 2015-05-23 18:21               ` Dan Carpenter
  0 siblings, 0 replies; 54+ messages in thread
From: Dan Carpenter @ 2015-05-23 18:21 UTC (permalink / raw)
  To: Joe Perches
  Cc: Julia Lawall, Andrew Morton, oleg.drokin, devel, gregkh,
	kernel-janitors, linux-kernel, HPDD-discuss, lustre-deve,
	Mike Shuey

On Sat, May 23, 2015 at 11:13:31AM -0700, Joe Perches wrote:
> On Sat, 2015-05-23 at 21:07 +0300, Dan Carpenter wrote:
> > No one uses the complicated options on checkpatch anyway, they just grep
> > away the warnings they don't like.
> 
> That'd be false.  Other projects like u-boot do.

Ah.  Ok.

regards,
dan carpenter


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [PATCH v4 00/13] staging: lustre: lnet: code cleanups
  2015-05-23 14:05           ` Michael Shuey
@ 2015-05-31  2:24             ` Greg KH
  0 siblings, 0 replies; 54+ messages in thread
From: Greg KH @ 2015-05-31  2:24 UTC (permalink / raw)
  To: Michael Shuey
  Cc: Sudip Mukherjee, devel, kernel-janitors, linux-kernel,
	Drokin, Oleg, HPDD-discuss, Dan Carpenter, lustre-devel

A: No.
Q: Should I include quotations after my reply?

http://daringfireball.net/2007/07/on_top

On Sat, May 23, 2015 at 10:05:27AM -0400, Michael Shuey wrote:
> Ah - that explains it.  I added additional files to the series, but
> simply re-applied the original round of patches.  They would've not
> been tagged as v4, while the patches pertaining to the newly-modified
> files had v4 in the subject.

The way you sent these is a mess, I can't sort by subject and apply them
properly.

Either put the v4 at the front, for all patches, like this:
	[PATCH v4 01/13]
or at the end of the number:
	[PATCH 01/13 v4]
so I can sort them.

Please fix up and resend this series, I've dropped it from my queue.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 54+ messages in thread

end of thread, other threads:[~2015-05-31  2:24 UTC | newest]

Thread overview: 54+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-05-21 19:50 [PATCH v4 00/13] staging: lustre: lnet: code cleanups Mike Shuey
2015-05-21 19:50 ` [PATCH v4 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
2015-05-21 19:50 ` [PATCH v4 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks Mike Shuey
2015-05-21 19:50 ` [PATCH v4 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync Mike Shuey
2015-05-21 19:50 ` [PATCH v4 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals Mike Shuey
2015-05-21 19:50 ` [PATCH v4 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
2015-05-22 13:03   ` Dan Carpenter
2015-05-21 19:50 ` [PATCH v4 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro Mike Shuey
2015-05-21 19:50 ` [PATCH v4 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue Mike Shuey
2015-05-21 19:50 ` [PATCH v4 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
2015-05-21 21:00   ` Joe Perches
2015-05-21 21:29     ` Julia Lawall
     [not found]       ` <CABqvwjuKypiacf3336j-q450FGKaO4wh0Ld7PF8MGz0VPsYBbw@mail.gmail.com>
2015-05-22  2:46         ` Joe Perches
2015-05-22  5:06         ` Julia Lawall
2015-05-22  8:08           ` Drokin, Oleg
2015-05-22 15:42             ` Joe Perches
2015-05-22 21:16               ` Drokin, Oleg
2015-05-22 23:57                 ` Joe Perches
2015-05-23  0:07                   ` Drokin, Oleg
2015-05-23  0:18                     ` Joe Perches
2015-05-23  0:25                       ` Drokin, Oleg
2015-05-23  0:36                         ` Joe Perches
2015-05-23  3:13                           ` [HPDD-discuss] " Patrick Farrell
     [not found]     ` <CABqvwjs=S1VrCHe7M4JnDikGBVN3yqjNMrtSA=UvftGmxSasRQ@mail.gmail.com>
2015-05-22  3:06       ` Joe Perches
2015-05-22  8:04       ` Drokin, Oleg
2015-05-22 10:00         ` Julia Lawall
2015-05-23  6:28     ` [PATCH] checkpatch: Categorize some long line length checks Joe Perches
2015-05-23 11:32       ` Julia Lawall
2015-05-23 17:26         ` Joe Perches
2015-05-23 18:07           ` Dan Carpenter
2015-05-23 18:13             ` Joe Perches
2015-05-23 18:21               ` Dan Carpenter
2015-05-21 19:50 ` [PATCH v4 11/13] staging: lnet: o2iblnd: checkpatch.pl fixes Mike Shuey
2015-05-22 10:49   ` Dan Carpenter
2015-05-21 19:50 ` [PATCH v4 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups Mike Shuey
2015-05-21 19:50 ` [PATCH v4 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
2015-05-21 19:50 ` [PATCH 01/13] staging: lustre: lnet: lnet: code cleanups - variable declarations Mike Shuey
2015-05-21 19:50 ` [PATCH 02/13] staging: lustre: lnet: dead code - remove lnet_fini_locks Mike Shuey
2015-05-21 19:50 ` [PATCH 03/13] staging: lustre: lnet: dead code - remove LNetSetAsync Mike Shuey
2015-05-21 19:50 ` [PATCH 04/13] staging: lustre: lnet: lnet: Module is LNet, not Portals Mike Shuey
2015-05-21 19:50 ` [PATCH 05/13] staging: lustre: lnet: o2iblnd: code cleanup - align whitespace Mike Shuey
2015-05-21 19:50 ` [PATCH 08/13] staging: lustre: lnet: remove LNET_MUTEX_LOCK macro Mike Shuey
2015-05-21 19:50 ` [PATCH 09/13] staging: lustre: lnet: lnet: remove dead code, fix checkpatch.pl issue Mike Shuey
2015-05-21 19:50 ` [PATCH 10/13] staging: lustre: lnet: lnet: checkpatch.pl fixes Mike Shuey
2015-05-21 19:50 ` [PATCH 11/13] staging: lnet: o2iblnd: " Mike Shuey
2015-05-21 19:50 ` [PATCH 12/13] staging: lustre: lnet: socklnd: checkpatch.pl cleanups Mike Shuey
2015-05-21 19:50 ` [PATCH 13/13] staging: lustre: lnet: selftest: checkpatch.pl fixes Mike Shuey
2015-05-22 10:55   ` Dan Carpenter
2015-05-22  9:21 ` [PATCH v4 00/13] staging: lustre: lnet: code cleanups Dan Carpenter
     [not found]   ` <CABqvwjsJXJYciMDHqy9vEdL6dGLhx3+Pi0_ro6192Z5SGR9Q_w@mail.gmail.com>
2015-05-23 10:14     ` Dan Carpenter
2015-05-23 12:09       ` Michael Shuey
2015-05-23 12:51         ` Sudip Mukherjee
2015-05-23 14:05           ` Michael Shuey
2015-05-31  2:24             ` Greg KH

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).