* [PATCH net-next 1/6] net/smc: cancel tx worker in case of socket aborts
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
@ 2018-01-25 10:15 ` Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 2/6] net/smc: handle state SMC_PEERFINCLOSEWAIT correctly Ursula Braun
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Ursula Braun @ 2018-01-25 10:15 UTC (permalink / raw)
To: davem; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl,
ubraun
If an SMC socket is aborted, the tx worker should be cancelled.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
---
net/smc/smc_close.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 1468a2a3cdf4..6de909612bd0 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -107,6 +107,9 @@ static void smc_close_active_abort(struct smc_sock *smc)
case SMC_INIT:
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
+ release_sock(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
@@ -116,6 +119,9 @@ static void smc_close_active_abort(struct smc_sock *smc)
sk->sk_state = SMC_PEERABORTWAIT;
else
sk->sk_state = SMC_CLOSED;
+ release_sock(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
@@ -249,9 +255,6 @@ int smc_close_active(struct smc_sock *smc)
/* peer sending PeerConnectionClosed will cause transition */
break;
case SMC_PROCESSABORT:
- release_sock(sk);
- cancel_delayed_work_sync(&conn->tx_work);
- lock_sock(sk);
smc_close_abort(conn);
sk->sk_state = SMC_CLOSED;
break;
@@ -327,6 +330,9 @@ static void smc_close_passive_work(struct work_struct *work)
rxflags = &conn->local_rx_ctrl.conn_state_flags;
if (rxflags->peer_conn_abort) {
smc_close_passive_abort_received(smc);
+ release_sock(&smc->sk);
+ cancel_delayed_work_sync(&conn->tx_work);
+ lock_sock(&smc->sk);
goto wakeup;
}
--
2.13.5
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 2/6] net/smc: handle state SMC_PEERFINCLOSEWAIT correctly
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 1/6] net/smc: cancel tx worker in case of socket aborts Ursula Braun
@ 2018-01-25 10:15 ` Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 3/6] net/smc: terminate link group for ib_post_send problems Ursula Braun
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Ursula Braun @ 2018-01-25 10:15 UTC (permalink / raw)
To: davem; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl,
ubraun
A state transition from closing state SMC_PEERFINCLOSEWAIT to closing
state SMC_APPFINCLOSEWAIT is not allowed. Once a closing indication
from the peer has been received, the socket reaches state SMC_CLOSED.
And receiving a peer_conn_abort just changes the state of the socket
into one of the states SMC_PROCESSABORT or SMC_CLOSED;
sending a peer_conn_abort occurs in smc_close_active() for state
SMC_PROCESSABORT only.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
---
net/smc/smc_close.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 6de909612bd0..3dc109f5db56 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -280,7 +280,6 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
case SMC_APPFINCLOSEWAIT:
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
- smc_close_abort(&smc->conn);
sk->sk_state = SMC_PROCESSABORT;
break;
case SMC_PEERCLOSEWAIT1:
@@ -288,7 +287,6 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
if (txflags->peer_done_writing &&
!smc_close_sent_any_close(&smc->conn)) {
/* just shutdown, but not yet closed locally */
- smc_close_abort(&smc->conn);
sk->sk_state = SMC_PROCESSABORT;
} else {
sk->sk_state = SMC_CLOSED;
@@ -354,7 +352,6 @@ static void smc_close_passive_work(struct work_struct *work)
/* fall through */
/* to check for closing */
case SMC_PEERCLOSEWAIT2:
- case SMC_PEERFINCLOSEWAIT:
if (!smc_cdc_rxed_any_close(conn))
break;
if (sock_flag(sk, SOCK_DEAD) &&
@@ -366,6 +363,10 @@ static void smc_close_passive_work(struct work_struct *work)
sk->sk_state = SMC_APPFINCLOSEWAIT;
}
break;
+ case SMC_PEERFINCLOSEWAIT:
+ if (smc_cdc_rxed_any_close(conn))
+ sk->sk_state = SMC_CLOSED;
+ break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
case SMC_APPFINCLOSEWAIT:
--
2.13.5
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 3/6] net/smc: terminate link group for ib_post_send problems
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 1/6] net/smc: cancel tx worker in case of socket aborts Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 2/6] net/smc: handle state SMC_PEERFINCLOSEWAIT correctly Ursula Braun
@ 2018-01-25 10:15 ` Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 4/6] net/smc: do not reuse a linkgroup with setup problems Ursula Braun
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Ursula Braun @ 2018-01-25 10:15 UTC (permalink / raw)
To: davem; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl,
ubraun
If ib_post_send() fails, terminate all connections of this
link group.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
---
net/smc/smc_tx.c | 4 +++-
net/smc/smc_wr.c | 8 +++++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index fea6482233a6..71b7d9f079f0 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -248,8 +248,10 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
peer_rmbe_offset;
rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
- if (rc)
+ if (rc) {
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+ smc_lgr_terminate(lgr);
+ }
return rc;
}
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 5ed94109d1d6..621c65850a18 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -248,8 +248,14 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
pend = container_of(priv, struct smc_wr_tx_pend, priv);
rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
&failed_wr);
- if (rc)
+ if (rc) {
+ struct smc_link_group *lgr =
+ container_of(link, struct smc_link_group,
+ lnk[SMC_SINGLE_LINK]);
+
smc_wr_tx_put_slot(link, priv);
+ smc_lgr_terminate(lgr);
+ }
return rc;
}
--
2.13.5
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 4/6] net/smc: do not reuse a linkgroup with setup problems
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
` (2 preceding siblings ...)
2018-01-25 10:15 ` [PATCH net-next 3/6] net/smc: terminate link group for ib_post_send problems Ursula Braun
@ 2018-01-25 10:15 ` Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 5/6] net/smc: wake up wr_reg_wait when terminating a link group Ursula Braun
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Ursula Braun @ 2018-01-25 10:15 UTC (permalink / raw)
To: davem; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl,
ubraun
Once a linkgroup is created successfully, it stays alive for a
certain time to service more connections potentially created.
If one of the initialization steps for a new linkgroup fails,
the linkgroup should not be reused by other connections following.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
---
net/smc/af_smc.c | 17 +++++++++++++++++
net/smc/smc_core.c | 3 +++
2 files changed, 20 insertions(+)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 05cbcd3a6f60..cf0e11978b66 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -377,6 +377,15 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->qp_mtu;
}
+static void smc_lgr_forget(struct smc_link_group *lgr)
+{
+ spin_lock_bh(&smc_lgr_list.lock);
+ /* do not use this link group for new connections */
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list);
+ spin_unlock_bh(&smc_lgr_list.lock);
+}
+
/* setup for RDMA connection of client */
static int smc_connect_rdma(struct smc_sock *smc)
{
@@ -513,6 +522,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
return rc ? rc : local_contact;
decline_rdma_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
decline_rdma:
@@ -526,6 +537,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
goto out_connected;
out_err_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
out_err:
@@ -906,6 +919,8 @@ static void smc_listen_work(struct work_struct *work)
return;
decline_rdma_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(new_smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
decline_rdma:
/* RDMA setup failed, switch back to TCP */
@@ -918,6 +933,8 @@ static void smc_listen_work(struct work_struct *work)
goto out_connected;
out_err_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(new_smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
out_err:
newsmcsk->sk_state = SMC_CLOSED;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 94f21116dac5..7406cbb41856 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -128,6 +128,8 @@ static void smc_lgr_free_work(struct work_struct *work)
bool conns;
spin_lock_bh(&smc_lgr_list.lock);
+ if (list_empty(&lgr->list))
+ goto free;
read_lock_bh(&lgr->conns_lock);
conns = RB_EMPTY_ROOT(&lgr->conns_all);
read_unlock_bh(&lgr->conns_lock);
@@ -136,6 +138,7 @@ static void smc_lgr_free_work(struct work_struct *work)
return;
}
list_del_init(&lgr->list); /* remove from smc_lgr_list */
+free:
spin_unlock_bh(&smc_lgr_list.lock);
smc_lgr_free(lgr);
}
--
2.13.5
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 5/6] net/smc: wake up wr_reg_wait when terminating a link group
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
` (3 preceding siblings ...)
2018-01-25 10:15 ` [PATCH net-next 4/6] net/smc: do not reuse a linkgroup with setup problems Ursula Braun
@ 2018-01-25 10:15 ` Ursula Braun
2018-01-25 10:15 ` [PATCH net-next 6/6] net/smc: check for healthy link group resp. connections Ursula Braun
2018-01-25 21:11 ` [PATCH net-next 0/6] net/smc: more socket closing improvements David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Ursula Braun @ 2018-01-25 10:15 UTC (permalink / raw)
To: davem; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl,
ubraun
If a new connection with a new rmb is added to a link group, its
memory region is registered. If a link group is terminated, a pending
registration requires a wake up.
And consolidate setting of tx_flag peer_conn_abort in smc_lgr_terminate().
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
---
net/smc/smc_close.c | 14 ++++++--------
net/smc/smc_core.c | 8 +++++---
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 3dc109f5db56..babe05d385e7 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -89,7 +89,7 @@ static int smc_close_abort(struct smc_connection *conn)
}
/* terminate smc socket abnormally - active abort
- * RDMA communication no longer possible
+ * link group is terminated, i.e. RDMA communication no longer possible
*/
static void smc_close_active_abort(struct smc_sock *smc)
{
@@ -113,7 +113,6 @@ static void smc_close_active_abort(struct smc_sock *smc)
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
- txflags->peer_conn_abort = 1;
sock_release(smc->clcsock);
if (!smc_cdc_rxed_any_close(&smc->conn))
sk->sk_state = SMC_PEERABORTWAIT;
@@ -127,7 +126,6 @@ static void smc_close_active_abort(struct smc_sock *smc)
case SMC_PEERCLOSEWAIT2:
if (!txflags->peer_conn_closed) {
sk->sk_state = SMC_PEERABORTWAIT;
- txflags->peer_conn_abort = 1;
sock_release(smc->clcsock);
} else {
sk->sk_state = SMC_CLOSED;
@@ -135,10 +133,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
break;
case SMC_PROCESSABORT:
case SMC_APPFINCLOSEWAIT:
- if (!txflags->peer_conn_closed) {
- txflags->peer_conn_abort = 1;
+ if (!txflags->peer_conn_closed)
sock_release(smc->clcsock);
- }
sk->sk_state = SMC_CLOSED;
break;
case SMC_PEERFINCLOSEWAIT:
@@ -303,8 +299,9 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
}
}
-/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
- * or peer_done_writing.
+/* Either some kind of closing has been received: peer_conn_closed,
+ * peer_conn_abort, or peer_done_writing
+ * or the link group of the connection terminates abnormally.
*/
static void smc_close_passive_work(struct work_struct *work)
{
@@ -327,6 +324,7 @@ static void smc_close_passive_work(struct work_struct *work)
rxflags = &conn->local_rx_ctrl.conn_state_flags;
if (rxflags->peer_conn_abort) {
+ /* peer has not received all data */
smc_close_passive_abort_received(smc);
release_sock(&smc->sk);
cancel_delayed_work_sync(&conn->tx_work);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 7406cbb41856..ed5b46d1fe41 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -234,9 +234,7 @@ static void smc_buf_unuse(struct smc_connection *conn)
/* remove a finished connection from its link group */
void smc_conn_free(struct smc_connection *conn)
{
- struct smc_link_group *lgr = conn->lgr;
-
- if (!lgr)
+ if (!conn->lgr)
return;
smc_cdc_tx_dismiss_slots(conn);
smc_lgr_unregister_conn(conn);
@@ -331,12 +329,16 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
conn = rb_entry(node, struct smc_connection, alert_node);
smc = container_of(conn, struct smc_sock, conn);
sock_hold(&smc->sk);
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
__smc_lgr_unregister_conn(conn);
+ write_unlock_bh(&lgr->conns_lock);
schedule_work(&conn->close_work);
+ write_lock_bh(&lgr->conns_lock);
sock_put(&smc->sk);
node = rb_first(&lgr->conns_all);
}
write_unlock_bh(&lgr->conns_lock);
+ wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
}
/* Determine vlan of internal TCP socket.
--
2.13.5
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH net-next 6/6] net/smc: check for healthy link group resp. connections
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
` (4 preceding siblings ...)
2018-01-25 10:15 ` [PATCH net-next 5/6] net/smc: wake up wr_reg_wait when terminating a link group Ursula Braun
@ 2018-01-25 10:15 ` Ursula Braun
2018-01-25 21:11 ` [PATCH net-next 0/6] net/smc: more socket closing improvements David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Ursula Braun @ 2018-01-25 10:15 UTC (permalink / raw)
To: davem; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl,
ubraun
If a problem for at least one connection of a link group is detected,
the whole link group and all its connections are terminated.
This patch adds a check for healthy link group when trying to reserve
a work request, and checks for healthy connections before starting
a tx worker.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
---
net/smc/smc_cdc.c | 9 +++++++--
net/smc/smc_diag.c | 6 ++++--
net/smc/smc_tx.c | 15 ++++++++++++---
net/smc/smc_wr.c | 11 ++++++-----
4 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 51805334e001..6e8f5fbe0f09 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -65,9 +65,14 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_cdc_tx_pend **pend)
{
struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+ int rc;
- return smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
- (struct smc_wr_tx_pend_priv **)pend);
+ rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
+ (struct smc_wr_tx_pend_priv **)pend);
+ if (!conn->alert_token_local)
+ /* abnormal termination */
+ rc = -EPIPE;
+ return rc;
}
static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index d2d01cf70224..427b91c1c964 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -86,7 +86,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
goto errout;
- if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.lgr) {
+ if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
+ smc->conn.alert_token_local) {
struct smc_connection *conn = &smc->conn;
struct smc_diag_conninfo cinfo = {
.token = conn->alert_token_local,
@@ -124,7 +125,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
goto errout;
}
- if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr) {
+ if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr &&
+ !list_empty(&smc->conn.lgr->list)) {
struct smc_diag_lgrinfo linfo = {
.role = smc->conn.lgr->role,
.lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 71b7d9f079f0..838bce20c361 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -408,8 +408,9 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
goto out_unlock;
}
rc = 0;
- schedule_delayed_work(&conn->tx_work,
- SMC_TX_WORK_DELAY);
+ if (conn->alert_token_local) /* connection healthy */
+ schedule_delayed_work(&conn->tx_work,
+ SMC_TX_WORK_DELAY);
}
goto out_unlock;
}
@@ -440,10 +441,17 @@ static void smc_tx_work(struct work_struct *work)
int rc;
lock_sock(&smc->sk);
+ if (smc->sk.sk_err ||
+ !conn->alert_token_local ||
+ conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
+ goto out;
+
rc = smc_tx_sndbuf_nonempty(conn);
if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
!atomic_read(&conn->bytes_to_rcv))
conn->local_rx_ctrl.prod_flags.write_blocked = 0;
+
+out:
release_sock(&smc->sk);
}
@@ -464,7 +472,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
((to_confirm > conn->rmbe_update_limit) &&
((to_confirm > (conn->rmbe_size / 2)) ||
conn->local_rx_ctrl.prod_flags.write_blocked))) {
- if (smc_cdc_get_slot_and_msg_send(conn) < 0) {
+ if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
+ conn->alert_token_local) { /* connection healthy */
schedule_delayed_work(&conn->tx_work,
SMC_TX_WORK_DELAY);
return;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 621c65850a18..1b8af23e6e2b 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -174,9 +174,9 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
struct smc_wr_tx_pend_priv **wr_pend_priv)
{
struct smc_wr_tx_pend *wr_pend;
+ u32 idx = link->wr_tx_cnt;
struct ib_send_wr *wr_ib;
u64 wr_id;
- u32 idx;
int rc;
*wr_buf = NULL;
@@ -186,16 +186,17 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
if (rc)
return rc;
} else {
+ struct smc_link_group *lgr;
+
+ lgr = container_of(link, struct smc_link_group,
+ lnk[SMC_SINGLE_LINK]);
rc = wait_event_timeout(
link->wr_tx_wait,
+ list_empty(&lgr->list) || /* lgr terminated */
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
if (!rc) {
/* timeout - terminate connections */
- struct smc_link_group *lgr;
-
- lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
smc_lgr_terminate(lgr);
return -EPIPE;
}
--
2.13.5
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH net-next 0/6] net/smc: more socket closing improvements
2018-01-25 10:15 [PATCH net-next 0/6] net/smc: more socket closing improvements Ursula Braun
` (5 preceding siblings ...)
2018-01-25 10:15 ` [PATCH net-next 6/6] net/smc: check for healthy link group resp. connections Ursula Braun
@ 2018-01-25 21:11 ` David Miller
6 siblings, 0 replies; 8+ messages in thread
From: David Miller @ 2018-01-25 21:11 UTC (permalink / raw)
To: ubraun; +Cc: netdev, linux-s390, jwi, schwidefsky, heiko.carstens, raspl
From: Ursula Braun <ubraun@linux.vnet.ibm.com>
Date: Thu, 25 Jan 2018 11:15:30 +0100
> these patches improve the smc behavior for abnormal socket closing.
Series applied, thank you.
^ permalink raw reply [flat|nested] 8+ messages in thread