netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ying Xue <ying.xue@windriver.com>
To: <davem@davemloft.net>
Cc: jon.maloy@ericsson.com, Paul.Gortmaker@windriver.com,
	tipc-discussion@lists.sourceforge.net, netdev@vger.kernel.org
Subject: [PATCH net-next 11/11] tipc: fix race in disc create/delete
Date: Mon, 21 Apr 2014 10:55:52 +0800	[thread overview]
Message-ID: <1398048952-7825-12-git-send-email-ying.xue@windriver.com> (raw)
In-Reply-To: <1398048952-7825-1-git-send-email-ying.xue@windriver.com>

Commit a21a584d6720ce349b05795b9bcfab3de8e58419 (tipc: fix neighbor
detection problem after hw address change) introduces a race condition
involving tipc_disc_delete() and tipc_disc_add/remove_dest that can
cause TIPC to dereference the pointer to the bearer discovery request
structure after it has been freed since a stray pointer is left in the
bearer structure.

In order to fix the issue, the process of resetting the discovery
request handler is optimized: the discovery request handler and request
buffer are just reset instead of being freed, allocated and initialized.
As the request point is always valid and the request's lock is taken
while the request handler is reset, the race doesn't happen any more.

Reported-by: Erik Hugne <erik.hugne@ericsson.com>
Signed-off-by: Ying Xue <ying.xue@windriver.com>
Reviewed-by: Erik Hugne <erik.hugne@ericsson.com>
Tested-by: Erik Hugne <erik.hugne@ericsson.com>
---
 net/tipc/bearer.c   |    3 +--
 net/tipc/discover.c |   53 ++++++++++++++++++++++++++++++++++-----------------
 net/tipc/discover.h |    1 +
 3 files changed, 37 insertions(+), 20 deletions(-)

diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3abd970..f3259d4 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -365,9 +365,8 @@ restart:
 static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
 {
 	pr_info("Resetting bearer <%s>\n", b_ptr->name);
-	tipc_disc_delete(b_ptr->link_req);
 	tipc_link_reset_list(b_ptr->identity);
-	tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
+	tipc_disc_reset(b_ptr);
 	return 0;
 }
 
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 3a8f211..ada42e4 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -71,22 +71,19 @@ struct tipc_link_req {
  * @type: message type (request or response)
  * @b_ptr: ptr to bearer issuing message
  */
-static struct sk_buff *tipc_disc_init_msg(u32 type, struct tipc_bearer *b_ptr)
+static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
+			       struct tipc_bearer *b_ptr)
 {
-	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
 	struct tipc_msg *msg;
 	u32 dest_domain = b_ptr->domain;
 
-	if (buf) {
-		msg = buf_msg(buf);
-		tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
-		msg_set_non_seq(msg, 1);
-		msg_set_node_sig(msg, tipc_random);
-		msg_set_dest_domain(msg, dest_domain);
-		msg_set_bc_netid(msg, tipc_net_id);
-		b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
-	}
-	return buf;
+	msg = buf_msg(buf);
+	tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+	msg_set_non_seq(msg, 1);
+	msg_set_node_sig(msg, tipc_random);
+	msg_set_dest_domain(msg, dest_domain);
+	msg_set_bc_netid(msg, tipc_net_id);
+	b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
 }
 
 /**
@@ -241,8 +238,9 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 	link_fully_up = link_working_working(link);
 
 	if ((type == DSC_REQ_MSG) && !link_fully_up) {
-		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, b_ptr);
+		rbuf = tipc_buf_acquire(INT_H_SIZE);
 		if (rbuf) {
+			tipc_disc_init_msg(rbuf, DSC_RESP_MSG, b_ptr);
 			tipc_bearer_send(b_ptr->identity, rbuf, &media_addr);
 			kfree_skb(rbuf);
 		}
@@ -349,12 +347,11 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
 	if (!req)
 		return -ENOMEM;
 
-	req->buf = tipc_disc_init_msg(DSC_REQ_MSG, b_ptr);
-	if (!req->buf) {
-		kfree(req);
-		return -ENOMSG;
-	}
+	req->buf = tipc_buf_acquire(INT_H_SIZE);
+	if (!req->buf)
+		return -ENOMEM;
 
+	tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
 	memcpy(&req->dest, dest, sizeof(*dest));
 	req->bearer_id = b_ptr->identity;
 	req->domain = b_ptr->domain;
@@ -379,3 +376,23 @@ void tipc_disc_delete(struct tipc_link_req *req)
 	kfree_skb(req->buf);
 	kfree(req);
 }
+
+/**
+ * tipc_disc_reset - reset object to send periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest_domain: network domain to which links can be established
+ */
+void tipc_disc_reset(struct tipc_bearer *b_ptr)
+{
+	struct tipc_link_req *req = b_ptr->link_req;
+
+	spin_lock_bh(&req->lock);
+	tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+	req->bearer_id = b_ptr->identity;
+	req->domain = b_ptr->domain;
+	req->num_nodes = 0;
+	req->timer_intv = TIPC_LINK_REQ_INIT;
+	k_start_timer(&req->timer, req->timer_intv);
+	tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+	spin_unlock_bh(&req->lock);
+}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 07f3472..515b573 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -41,6 +41,7 @@ struct tipc_link_req;
 
 int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
 void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_reset(struct tipc_bearer *b_ptr);
 void tipc_disc_add_dest(struct tipc_link_req *req);
 void tipc_disc_remove_dest(struct tipc_link_req *req);
 void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
-- 
1.7.9.5


------------------------------------------------------------------------------
Start Your Social Network Today - Download eXo Platform
Build your Enterprise Intranet with eXo Platform Software
Java Based Open Source Intranet - Social, Extensible, Cloud Ready
Get Started Now And Turn Your Intranet Into A Collaboration Platform
http://p.sf.net/sfu/ExoPlatform

  parent reply	other threads:[~2014-04-21  2:55 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-04-21  2:55 [PATCH net-next 00/11] purge tipc_net_lock Ying Xue
2014-04-21  2:55 ` [PATCH net-next 01/11] tipc: replace config_mutex lock with RTNL lock Ying Xue
2014-04-21  2:55 ` [PATCH net-next 02/11] tipc: adjust locking policy of protecting tipc_ptr pointer of net_device Ying Xue
2014-04-21  2:55 ` [PATCH net-next 03/11] tipc: use RTNL lock to protect tipc_net_stop routine Ying Xue
2014-04-21  2:55 ` [PATCH net-next 04/11] tipc: convert bearer_list to RCU list Ying Xue
2014-04-21  2:55 ` [PATCH net-next 05/11] tipc: decouple the relationship between bearer and link Ying Xue
2014-04-21  2:55 ` [PATCH net-next 06/11] tipc: use RCU to protect media_ptr pointer Ying Xue
2014-04-21  2:55 ` [PATCH net-next 07/11] tipc: purge tipc_net_lock lock Ying Xue
2014-04-21  2:55 ` [PATCH net-next 08/11] tipc: make media_ptr pointed netdevice valid Ying Xue
2014-04-21  2:55 ` [PATCH net-next 09/11] tipc: use bearer_disable to disable bearer in tipc_l2_device_event Ying Xue
2014-04-21  2:55 ` [PATCH net-next 10/11] tipc: use bc_lock to protect node map in bearer structure Ying Xue
2014-04-21  2:55 ` Ying Xue [this message]
2014-04-23  1:18 ` [PATCH net-next 00/11] purge tipc_net_lock David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1398048952-7825-12-git-send-email-ying.xue@windriver.com \
    --to=ying.xue@windriver.com \
    --cc=Paul.Gortmaker@windriver.com \
    --cc=davem@davemloft.net \
    --cc=jon.maloy@ericsson.com \
    --cc=netdev@vger.kernel.org \
    --cc=tipc-discussion@lists.sourceforge.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).