netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next] tcp: remove one indentation level in tcp_rcv_state_process()
@ 2013-04-19  3:16 Eric Dumazet
  2013-04-19 17:04 ` Eric Dumazet
  2013-05-23 15:55 ` [PATCH v2 " Eric Dumazet
  0 siblings, 2 replies; 14+ messages in thread
From: Eric Dumazet @ 2013-04-19  3:16 UTC (permalink / raw)
  To: David Miller; +Cc: netdev, Yuchung Cheng

From: Eric Dumazet <edumazet@google.com>

Remove one level of indentation 'introduced' in commit
c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)

@acceptable variable is a boolean.

This patch is a pure cleanup.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
---
 net/ipv4/tcp_input.c |  267 ++++++++++++++++++++---------------------
 1 file changed, 132 insertions(+), 135 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6d9ca35..08f58c1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5595,6 +5595,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct request_sock *req;
 	int queued = 0;
+	bool acceptable;
 
 	tp->rx_opt.saw_tstamp = 0;
 
@@ -5665,156 +5666,152 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 		return 0;
 
 	/* step 5: check the ACK field */
-	if (true) {
-		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
-
-		switch (sk->sk_state) {
-		case TCP_SYN_RECV:
-			if (acceptable) {
-				/* Once we leave TCP_SYN_RECV, we no longer
-				 * need req so release it.
-				 */
-				if (req) {
-					tcp_synack_rtt_meas(sk, req);
-					tp->total_retrans = req->num_retrans;
-
-					reqsk_fastopen_remove(sk, req, false);
-				} else {
-					/* Make sure socket is routed, for
-					 * correct metrics.
-					 */
-					icsk->icsk_af_ops->rebuild_header(sk);
-					tcp_init_congestion_control(sk);
+	acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
-					tcp_mtup_init(sk);
-					tcp_init_buffer_space(sk);
-					tp->copied_seq = tp->rcv_nxt;
-				}
-				smp_mb();
-				tcp_set_state(sk, TCP_ESTABLISHED);
-				sk->sk_state_change(sk);
+	switch (sk->sk_state) {
+	case TCP_SYN_RECV:
+		if (acceptable) {
+			/* Once we leave TCP_SYN_RECV, we no longer
+			 * need req so release it.
+			 */
+			if (req) {
+				tcp_synack_rtt_meas(sk, req);
+				tp->total_retrans = req->num_retrans;
 
-				/* Note, that this wakeup is only for marginal
-				 * crossed SYN case. Passively open sockets
-				 * are not waked up, because sk->sk_sleep ==
-				 * NULL and sk->sk_socket == NULL.
+				reqsk_fastopen_remove(sk, req, false);
+			} else {
+				/* Make sure socket is routed, for
+				 * correct metrics.
 				 */
-				if (sk->sk_socket)
-					sk_wake_async(sk,
-						      SOCK_WAKE_IO, POLL_OUT);
-
-				tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
-				tp->snd_wnd = ntohs(th->window) <<
-					      tp->rx_opt.snd_wscale;
-				tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
-				if (tp->rx_opt.tstamp_ok)
-					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
-				if (req) {
-					/* Re-arm the timer because data may
-					 * have been sent out. This is similar
-					 * to the regular data transmission case
-					 * when new data has just been ack'ed.
-					 *
-					 * (TFO) - we could try to be more
-					 * aggressive and retranmitting any data
-					 * sooner based on when they were sent
-					 * out.
-					 */
-					tcp_rearm_rto(sk);
-				} else
-					tcp_init_metrics(sk);
+				icsk->icsk_af_ops->rebuild_header(sk);
+				tcp_init_congestion_control(sk);
 
-				/* Prevent spurious tcp_cwnd_restart() on
-				 * first data packet.
+				tcp_mtup_init(sk);
+				tcp_init_buffer_space(sk);
+				tp->copied_seq = tp->rcv_nxt;
+			}
+			smp_mb();
+			tcp_set_state(sk, TCP_ESTABLISHED);
+			sk->sk_state_change(sk);
+
+			/* Note, that this wakeup is only for marginal
+			 * crossed SYN case. Passively open sockets
+			 * are not waked up, because sk->sk_sleep ==
+			 * NULL and sk->sk_socket == NULL.
+			 */
+			if (sk->sk_socket)
+				sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+
+			tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+			tp->snd_wnd = ntohs(th->window) <<
+				      tp->rx_opt.snd_wscale;
+			tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+
+			if (tp->rx_opt.tstamp_ok)
+				tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+			if (req) {
+				/* Re-arm the timer because data may
+				 * have been sent out. This is similar
+				 * to the regular data transmission case
+				 * when new data has just been ack'ed.
+				 *
+				 * (TFO) - we could try to be more
+				 * aggressive and retranmitting any data
+				 * sooner based on when they were sent out.
 				 */
-				tp->lsndtime = tcp_time_stamp;
+				tcp_rearm_rto(sk);
+			} else
+				tcp_init_metrics(sk);
 
-				tcp_initialize_rcv_mss(sk);
-				tcp_fast_path_on(tp);
-			} else {
-				return 1;
-			}
-			break;
+			/* Prevent spurious tcp_cwnd_restart() on
+			 * first data packet.
+			 */
+			tp->lsndtime = tcp_time_stamp;
 
-		case TCP_FIN_WAIT1:
-			/* If we enter the TCP_FIN_WAIT1 state and we are a
-			 * Fast Open socket and this is the first acceptable
-			 * ACK we have received, this would have acknowledged
-			 * our SYNACK so stop the SYNACK timer.
+			tcp_initialize_rcv_mss(sk);
+			tcp_fast_path_on(tp);
+		} else {
+			return 1;
+		}
+		break;
+
+	case TCP_FIN_WAIT1:
+		/* If we enter the TCP_FIN_WAIT1 state and we are a
+		 * Fast Open socket and this is the first acceptable
+		 * ACK we have received, this would have acknowledged
+		 * our SYNACK so stop the SYNACK timer.
+		 */
+		if (req != NULL) {
+			/* Return RST if ack_seq is invalid.
+			 * Note that RFC793 only says to generate a
+			 * DUPACK for it but for TCP Fast Open it seems
+			 * better to treat this case like TCP_SYN_RECV
+			 * above.
 			 */
-			if (req != NULL) {
-				/* Return RST if ack_seq is invalid.
-				 * Note that RFC793 only says to generate a
-				 * DUPACK for it but for TCP Fast Open it seems
-				 * better to treat this case like TCP_SYN_RECV
-				 * above.
-				 */
-				if (!acceptable)
+			if (!acceptable)
+				return 1;
+			/* We no longer need the request sock. */
+			reqsk_fastopen_remove(sk, req, false);
+			tcp_rearm_rto(sk);
+		}
+		if (tp->snd_una == tp->write_seq) {
+			struct dst_entry *dst;
+
+			tcp_set_state(sk, TCP_FIN_WAIT2);
+			sk->sk_shutdown |= SEND_SHUTDOWN;
+
+			dst = __sk_dst_get(sk);
+			if (dst)
+				dst_confirm(dst);
+
+			if (!sock_flag(sk, SOCK_DEAD))
+				/* Wake up lingering close() */
+				sk->sk_state_change(sk);
+			else {
+				int tmo;
+
+				if (tp->linger2 < 0 ||
+				    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+				     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
+					tcp_done(sk);
+					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
 					return 1;
-				/* We no longer need the request sock. */
-				reqsk_fastopen_remove(sk, req, false);
-				tcp_rearm_rto(sk);
-			}
-			if (tp->snd_una == tp->write_seq) {
-				struct dst_entry *dst;
-
-				tcp_set_state(sk, TCP_FIN_WAIT2);
-				sk->sk_shutdown |= SEND_SHUTDOWN;
-
-				dst = __sk_dst_get(sk);
-				if (dst)
-					dst_confirm(dst);
-
-				if (!sock_flag(sk, SOCK_DEAD))
-					/* Wake up lingering close() */
-					sk->sk_state_change(sk);
-				else {
-					int tmo;
-
-					if (tp->linger2 < 0 ||
-					    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
-					     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
-						tcp_done(sk);
-						NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
-						return 1;
-					}
+				}
 
-					tmo = tcp_fin_time(sk);
-					if (tmo > TCP_TIMEWAIT_LEN) {
-						inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
-					} else if (th->fin || sock_owned_by_user(sk)) {
-						/* Bad case. We could lose such FIN otherwise.
-						 * It is not a big problem, but it looks confusing
-						 * and not so rare event. We still can lose it now,
-						 * if it spins in bh_lock_sock(), but it is really
-						 * marginal case.
-						 */
-						inet_csk_reset_keepalive_timer(sk, tmo);
-					} else {
-						tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
-						goto discard;
-					}
+				tmo = tcp_fin_time(sk);
+				if (tmo > TCP_TIMEWAIT_LEN) {
+					inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+				} else if (th->fin || sock_owned_by_user(sk)) {
+					/* Bad case. We could lose such FIN otherwise.
+					 * It is not a big problem, but it looks confusing
+					 * and not so rare event. We still can lose it now,
+					 * if it spins in bh_lock_sock(), but it is really
+					 * marginal case.
+					 */
+					inet_csk_reset_keepalive_timer(sk, tmo);
+				} else {
+					tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
+					goto discard;
 				}
 			}
-			break;
+		}
+		break;
 
-		case TCP_CLOSING:
-			if (tp->snd_una == tp->write_seq) {
-				tcp_time_wait(sk, TCP_TIME_WAIT, 0);
-				goto discard;
-			}
-			break;
+	case TCP_CLOSING:
+		if (tp->snd_una == tp->write_seq) {
+			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+			goto discard;
+		}
+		break;
 
-		case TCP_LAST_ACK:
-			if (tp->snd_una == tp->write_seq) {
-				tcp_update_metrics(sk);
-				tcp_done(sk);
-				goto discard;
-			}
-			break;
+	case TCP_LAST_ACK:
+		if (tp->snd_una == tp->write_seq) {
+			tcp_update_metrics(sk);
+			tcp_done(sk);
+			goto discard;
 		}
+		break;
 	}
 
 	/* ts_recent update must be made after we are sure that the packet

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-04-19  3:16 [PATCH net-next] tcp: remove one indentation level in tcp_rcv_state_process() Eric Dumazet
@ 2013-04-19 17:04 ` Eric Dumazet
  2013-04-19 18:22   ` David Miller
  2013-05-23 15:55 ` [PATCH v2 " Eric Dumazet
  1 sibling, 1 reply; 14+ messages in thread
From: Eric Dumazet @ 2013-04-19 17:04 UTC (permalink / raw)
  To: David Miller; +Cc: netdev, Yuchung Cheng, Neal Cardwell

On Thu, 2013-04-18 at 20:16 -0700, Eric Dumazet wrote:
> From: Eric Dumazet <edumazet@google.com>
> 
> Remove one level of indentation 'introduced' in commit
> c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
> 
> @acceptable variable is a boolean.
> 
> This patch is a pure cleanup.
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: Yuchung Cheng <ycheng@google.com>
> ---

Please drop this patch, I'll respin it later, when the "call
tcp_replace_ts_recent() from tcp_ack()" issues are sorted out.

Thanks

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-04-19 17:04 ` Eric Dumazet
@ 2013-04-19 18:22   ` David Miller
  0 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2013-04-19 18:22 UTC (permalink / raw)
  To: eric.dumazet; +Cc: netdev, ycheng, ncardwell

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Fri, 19 Apr 2013 10:04:48 -0700

> On Thu, 2013-04-18 at 20:16 -0700, Eric Dumazet wrote:
>> From: Eric Dumazet <edumazet@google.com>
>> 
>> Remove one level of indentation 'introduced' in commit
>> c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
>> 
>> @acceptable variable is a boolean.
>> 
>> This patch is a pure cleanup.
>> 
>> Signed-off-by: Eric Dumazet <edumazet@google.com>
>> Cc: Yuchung Cheng <ycheng@google.com>
>> ---
> 
> Please drop this patch, I'll respin it later, when the "call
> tcp_replace_ts_recent() from tcp_ack()" issues are sorted out.

Done.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-04-19  3:16 [PATCH net-next] tcp: remove one indentation level in tcp_rcv_state_process() Eric Dumazet
  2013-04-19 17:04 ` Eric Dumazet
@ 2013-05-23 15:55 ` Eric Dumazet
  2013-05-23 17:09   ` Joe Perches
  2013-05-25  1:03   ` [PATCH v3 " Eric Dumazet
  1 sibling, 2 replies; 14+ messages in thread
From: Eric Dumazet @ 2013-05-23 15:55 UTC (permalink / raw)
  To: David Miller; +Cc: netdev, Yuchung Cheng

From: Eric Dumazet <edumazet@google.com>

Remove one level of indentation 'introduced' in commit
c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)

if (true) {
	...
}

@acceptable variable is a boolean.

This patch is a pure cleanup.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
---
respin of v1 sent April 18th and postponed because of fix we
had to do before (tcp: call tcp_replace_ts_recent() from tcp_ack())

 net/ipv4/tcp_input.c |  270 ++++++++++++++++++++---------------------
 1 file changed, 134 insertions(+), 136 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d7d3694..162f40f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5533,6 +5533,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct request_sock *req;
 	int queued = 0;
+	bool acceptable;
 
 	tp->rx_opt.saw_tstamp = 0;
 
@@ -5603,157 +5604,154 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 		return 0;
 
 	/* step 5: check the ACK field */
-	if (true) {
-		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
-						  FLAG_UPDATE_TS_RECENT) > 0;
-
-		switch (sk->sk_state) {
-		case TCP_SYN_RECV:
-			if (acceptable) {
-				/* Once we leave TCP_SYN_RECV, we no longer
-				 * need req so release it.
-				 */
-				if (req) {
-					tcp_synack_rtt_meas(sk, req);
-					tp->total_retrans = req->num_retrans;
-
-					reqsk_fastopen_remove(sk, req, false);
-				} else {
-					/* Make sure socket is routed, for
-					 * correct metrics.
-					 */
-					icsk->icsk_af_ops->rebuild_header(sk);
-					tcp_init_congestion_control(sk);
+	acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+				      FLAG_UPDATE_TS_RECENT) > 0;
 
-					tcp_mtup_init(sk);
-					tcp_init_buffer_space(sk);
-					tp->copied_seq = tp->rcv_nxt;
-				}
-				smp_mb();
-				tcp_set_state(sk, TCP_ESTABLISHED);
-				sk->sk_state_change(sk);
+	switch (sk->sk_state) {
+	case TCP_SYN_RECV:
+		if (acceptable) {
+			/* Once we leave TCP_SYN_RECV, we no longer
+			 * need req so release it.
+			 */
+			if (req) {
+				tcp_synack_rtt_meas(sk, req);
+				tp->total_retrans = req->num_retrans;
 
-				/* Note, that this wakeup is only for marginal
-				 * crossed SYN case. Passively open sockets
-				 * are not waked up, because sk->sk_sleep ==
-				 * NULL and sk->sk_socket == NULL.
+				reqsk_fastopen_remove(sk, req, false);
+			} else {
+				/* Make sure socket is routed, for
+				 * correct metrics.
 				 */
-				if (sk->sk_socket)
-					sk_wake_async(sk,
-						      SOCK_WAKE_IO, POLL_OUT);
-
-				tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
-				tp->snd_wnd = ntohs(th->window) <<
-					      tp->rx_opt.snd_wscale;
-				tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
-				if (tp->rx_opt.tstamp_ok)
-					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
-				if (req) {
-					/* Re-arm the timer because data may
-					 * have been sent out. This is similar
-					 * to the regular data transmission case
-					 * when new data has just been ack'ed.
-					 *
-					 * (TFO) - we could try to be more
-					 * aggressive and retranmitting any data
-					 * sooner based on when they were sent
-					 * out.
-					 */
-					tcp_rearm_rto(sk);
-				} else
-					tcp_init_metrics(sk);
+				icsk->icsk_af_ops->rebuild_header(sk);
+				tcp_init_congestion_control(sk);
 
-				/* Prevent spurious tcp_cwnd_restart() on
-				 * first data packet.
+				tcp_mtup_init(sk);
+				tcp_init_buffer_space(sk);
+				tp->copied_seq = tp->rcv_nxt;
+			}
+			smp_mb();
+			tcp_set_state(sk, TCP_ESTABLISHED);
+			sk->sk_state_change(sk);
+
+			/* Note, that this wakeup is only for marginal
+			 * crossed SYN case. Passively open sockets
+			 * are not waked up, because sk->sk_sleep ==
+			 * NULL and sk->sk_socket == NULL.
+			 */
+			if (sk->sk_socket)
+				sk_wake_async(sk,
+					      SOCK_WAKE_IO, POLL_OUT);
+
+			tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+			tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
+			tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+
+			if (tp->rx_opt.tstamp_ok)
+				tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+			if (req) {
+				/* Re-arm the timer because data may
+				 * have been sent out. This is similar
+				 * to the regular data transmission case
+				 * when new data has just been ack'ed.
+				 *
+				 * (TFO) - we could try to be more
+				 * aggressive and retranmitting any data
+				 * sooner based on when they were sent
+				 * out.
 				 */
-				tp->lsndtime = tcp_time_stamp;
+				tcp_rearm_rto(sk);
+			} else
+				tcp_init_metrics(sk);
 
-				tcp_initialize_rcv_mss(sk);
-				tcp_fast_path_on(tp);
-			} else {
-				return 1;
-			}
-			break;
+			/* Prevent spurious tcp_cwnd_restart() on
+			 * first data packet.
+			 */
+			tp->lsndtime = tcp_time_stamp;
 
-		case TCP_FIN_WAIT1:
-			/* If we enter the TCP_FIN_WAIT1 state and we are a
-			 * Fast Open socket and this is the first acceptable
-			 * ACK we have received, this would have acknowledged
-			 * our SYNACK so stop the SYNACK timer.
+			tcp_initialize_rcv_mss(sk);
+			tcp_fast_path_on(tp);
+		} else {
+			return 1;
+		}
+		break;
+
+	case TCP_FIN_WAIT1:
+		/* If we enter the TCP_FIN_WAIT1 state and we are a
+		 * Fast Open socket and this is the first acceptable
+		 * ACK we have received, this would have acknowledged
+		 * our SYNACK so stop the SYNACK timer.
+		 */
+		if (req != NULL) {
+			/* Return RST if ack_seq is invalid.
+			 * Note that RFC793 only says to generate a
+			 * DUPACK for it but for TCP Fast Open it seems
+			 * better to treat this case like TCP_SYN_RECV
+			 * above.
 			 */
-			if (req != NULL) {
-				/* Return RST if ack_seq is invalid.
-				 * Note that RFC793 only says to generate a
-				 * DUPACK for it but for TCP Fast Open it seems
-				 * better to treat this case like TCP_SYN_RECV
-				 * above.
-				 */
-				if (!acceptable)
+			if (!acceptable)
+				return 1;
+			/* We no longer need the request sock. */
+			reqsk_fastopen_remove(sk, req, false);
+			tcp_rearm_rto(sk);
+		}
+		if (tp->snd_una == tp->write_seq) {
+			struct dst_entry *dst;
+
+			tcp_set_state(sk, TCP_FIN_WAIT2);
+			sk->sk_shutdown |= SEND_SHUTDOWN;
+
+			dst = __sk_dst_get(sk);
+			if (dst)
+				dst_confirm(dst);
+
+			if (!sock_flag(sk, SOCK_DEAD)) {
+				/* Wake up lingering close() */
+				sk->sk_state_change(sk);
+			} else {
+				int tmo;
+
+				if (tp->linger2 < 0 ||
+				    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+				     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
+					tcp_done(sk);
+					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
 					return 1;
-				/* We no longer need the request sock. */
-				reqsk_fastopen_remove(sk, req, false);
-				tcp_rearm_rto(sk);
-			}
-			if (tp->snd_una == tp->write_seq) {
-				struct dst_entry *dst;
-
-				tcp_set_state(sk, TCP_FIN_WAIT2);
-				sk->sk_shutdown |= SEND_SHUTDOWN;
-
-				dst = __sk_dst_get(sk);
-				if (dst)
-					dst_confirm(dst);
-
-				if (!sock_flag(sk, SOCK_DEAD))
-					/* Wake up lingering close() */
-					sk->sk_state_change(sk);
-				else {
-					int tmo;
-
-					if (tp->linger2 < 0 ||
-					    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
-					     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
-						tcp_done(sk);
-						NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
-						return 1;
-					}
+				}
 
-					tmo = tcp_fin_time(sk);
-					if (tmo > TCP_TIMEWAIT_LEN) {
-						inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
-					} else if (th->fin || sock_owned_by_user(sk)) {
-						/* Bad case. We could lose such FIN otherwise.
-						 * It is not a big problem, but it looks confusing
-						 * and not so rare event. We still can lose it now,
-						 * if it spins in bh_lock_sock(), but it is really
-						 * marginal case.
-						 */
-						inet_csk_reset_keepalive_timer(sk, tmo);
-					} else {
-						tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
-						goto discard;
-					}
+				tmo = tcp_fin_time(sk);
+				if (tmo > TCP_TIMEWAIT_LEN) {
+					inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+				} else if (th->fin || sock_owned_by_user(sk)) {
+					/* Bad case. We could lose such FIN otherwise.
+					 * It is not a big problem, but it looks confusing
+					 * and not so rare event. We still can lose it now,
+					 * if it spins in bh_lock_sock(), but it is really
+					 * marginal case.
+					 */
+					inet_csk_reset_keepalive_timer(sk, tmo);
+				} else {
+					tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
+					goto discard;
 				}
 			}
-			break;
+		}
+		break;
 
-		case TCP_CLOSING:
-			if (tp->snd_una == tp->write_seq) {
-				tcp_time_wait(sk, TCP_TIME_WAIT, 0);
-				goto discard;
-			}
-			break;
+	case TCP_CLOSING:
+		if (tp->snd_una == tp->write_seq) {
+			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+			goto discard;
+		}
+		break;
 
-		case TCP_LAST_ACK:
-			if (tp->snd_una == tp->write_seq) {
-				tcp_update_metrics(sk);
-				tcp_done(sk);
-				goto discard;
-			}
-			break;
+	case TCP_LAST_ACK:
+		if (tp->snd_una == tp->write_seq) {
+			tcp_update_metrics(sk);
+			tcp_done(sk);
+			goto discard;
 		}
+		break;
 	}
 
 	/* step 6: check the URG bit */

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-05-23 15:55 ` [PATCH v2 " Eric Dumazet
@ 2013-05-23 17:09   ` Joe Perches
  2013-05-23 17:12     ` Eric Dumazet
  2013-05-25  1:03   ` [PATCH v3 " Eric Dumazet
  1 sibling, 1 reply; 14+ messages in thread
From: Joe Perches @ 2013-05-23 17:09 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David Miller, netdev, Yuchung Cheng

On Thu, 2013-05-23 at 08:55 -0700, Eric Dumazet wrote:
> Remove one level of indentation 'introduced' in commit
> c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
[]
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
[]
> -				if (sk->sk_socket)
> -					sk_wake_async(sk,
> -						      SOCK_WAKE_IO, POLL_OUT);
[]
> +			if (sk->sk_socket)
> +				sk_wake_async(sk,
> +					      SOCK_WAKE_IO, POLL_OUT);

It'd also be nice to reflow the lines to 80 cols.

				sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-05-23 17:09   ` Joe Perches
@ 2013-05-23 17:12     ` Eric Dumazet
  0 siblings, 0 replies; 14+ messages in thread
From: Eric Dumazet @ 2013-05-23 17:12 UTC (permalink / raw)
  To: Joe Perches; +Cc: David Miller, netdev, Yuchung Cheng

On Thu, 2013-05-23 at 10:09 -0700, Joe Perches wrote:
> On Thu, 2013-05-23 at 08:55 -0700, Eric Dumazet wrote:
> > Remove one level of indentation 'introduced' in commit
> > c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
> []
> > diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> []
> > -				if (sk->sk_socket)
> > -					sk_wake_async(sk,
> > -						      SOCK_WAKE_IO, POLL_OUT);
> []
> > +			if (sk->sk_socket)
> > +				sk_wake_async(sk,
> > +					      SOCK_WAKE_IO, POLL_OUT);
> 
> It'd also be nice to reflow the lines to 80 cols.
> 
> 				sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
> 

ok will do.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3 net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-05-23 15:55 ` [PATCH v2 " Eric Dumazet
  2013-05-23 17:09   ` Joe Perches
@ 2013-05-25  1:03   ` Eric Dumazet
  2013-05-25  4:02     ` [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process Joe Perches
  2013-05-26  6:23     ` [PATCH v3 net-next] tcp: remove one indentation level in tcp_rcv_state_process() David Miller
  1 sibling, 2 replies; 14+ messages in thread
From: Eric Dumazet @ 2013-05-25  1:03 UTC (permalink / raw)
  To: David Miller; +Cc: netdev, Yuchung Cheng

From: Eric Dumazet <edumazet@google.com>

Remove one level of indentation 'introduced' in commit
c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)

if (true) {
        ...
}

@acceptable variable is a boolean.

This patch is a pure cleanup.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
---
v3 : one sk_wake_async() reformatted (Joe Perches)
v2 : respin of v1 sent April 18th and postponed because of fix we
had to do before (tcp: call tcp_replace_ts_recent() from tcp_ack())

 net/ipv4/tcp_input.c |  269 ++++++++++++++++++++---------------------
 1 file changed, 133 insertions(+), 136 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8230cd6..4061425 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5536,6 +5536,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct request_sock *req;
 	int queued = 0;
+	bool acceptable;
 
 	tp->rx_opt.saw_tstamp = 0;
 
@@ -5606,157 +5607,153 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 		return 0;
 
 	/* step 5: check the ACK field */
-	if (true) {
-		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
-						  FLAG_UPDATE_TS_RECENT) > 0;
-
-		switch (sk->sk_state) {
-		case TCP_SYN_RECV:
-			if (acceptable) {
-				/* Once we leave TCP_SYN_RECV, we no longer
-				 * need req so release it.
-				 */
-				if (req) {
-					tcp_synack_rtt_meas(sk, req);
-					tp->total_retrans = req->num_retrans;
-
-					reqsk_fastopen_remove(sk, req, false);
-				} else {
-					/* Make sure socket is routed, for
-					 * correct metrics.
-					 */
-					icsk->icsk_af_ops->rebuild_header(sk);
-					tcp_init_congestion_control(sk);
+	acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+				      FLAG_UPDATE_TS_RECENT) > 0;
 
-					tcp_mtup_init(sk);
-					tcp_init_buffer_space(sk);
-					tp->copied_seq = tp->rcv_nxt;
-				}
-				smp_mb();
-				tcp_set_state(sk, TCP_ESTABLISHED);
-				sk->sk_state_change(sk);
+	switch (sk->sk_state) {
+	case TCP_SYN_RECV:
+		if (acceptable) {
+			/* Once we leave TCP_SYN_RECV, we no longer
+			 * need req so release it.
+			 */
+			if (req) {
+				tcp_synack_rtt_meas(sk, req);
+				tp->total_retrans = req->num_retrans;
 
-				/* Note, that this wakeup is only for marginal
-				 * crossed SYN case. Passively open sockets
-				 * are not waked up, because sk->sk_sleep ==
-				 * NULL and sk->sk_socket == NULL.
+				reqsk_fastopen_remove(sk, req, false);
+			} else {
+				/* Make sure socket is routed, for
+				 * correct metrics.
 				 */
-				if (sk->sk_socket)
-					sk_wake_async(sk,
-						      SOCK_WAKE_IO, POLL_OUT);
-
-				tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
-				tp->snd_wnd = ntohs(th->window) <<
-					      tp->rx_opt.snd_wscale;
-				tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
-				if (tp->rx_opt.tstamp_ok)
-					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
-				if (req) {
-					/* Re-arm the timer because data may
-					 * have been sent out. This is similar
-					 * to the regular data transmission case
-					 * when new data has just been ack'ed.
-					 *
-					 * (TFO) - we could try to be more
-					 * aggressive and retranmitting any data
-					 * sooner based on when they were sent
-					 * out.
-					 */
-					tcp_rearm_rto(sk);
-				} else
-					tcp_init_metrics(sk);
+				icsk->icsk_af_ops->rebuild_header(sk);
+				tcp_init_congestion_control(sk);
 
-				/* Prevent spurious tcp_cwnd_restart() on
-				 * first data packet.
+				tcp_mtup_init(sk);
+				tcp_init_buffer_space(sk);
+				tp->copied_seq = tp->rcv_nxt;
+			}
+			smp_mb();
+			tcp_set_state(sk, TCP_ESTABLISHED);
+			sk->sk_state_change(sk);
+
+			/* Note, that this wakeup is only for marginal
+			 * crossed SYN case. Passively open sockets
+			 * are not waked up, because sk->sk_sleep ==
+			 * NULL and sk->sk_socket == NULL.
+			 */
+			if (sk->sk_socket)
+				sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+
+			tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+			tp->snd_wnd = ntohs(th->window) <<
+					tp->rx_opt.snd_wscale;
+			tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+
+			if (tp->rx_opt.tstamp_ok)
+				tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+			if (req) {
+				/* Re-arm the timer because data may
+				 * have been sent out. This is similar
+				 * to the regular data transmission case
+				 * when new data has just been ack'ed.
+				 *
+				 * (TFO) - we could try to be more aggressive
+				 * and retransmitting any data sooner based
+				 * on when they are sent out.
 				 */
-				tp->lsndtime = tcp_time_stamp;
+				tcp_rearm_rto(sk);
+			} else
+				tcp_init_metrics(sk);
 
-				tcp_initialize_rcv_mss(sk);
-				tcp_fast_path_on(tp);
-			} else {
-				return 1;
-			}
-			break;
+			/* Prevent spurious tcp_cwnd_restart() on
+			 * first data packet.
+			 */
+			tp->lsndtime = tcp_time_stamp;
 
-		case TCP_FIN_WAIT1:
-			/* If we enter the TCP_FIN_WAIT1 state and we are a
-			 * Fast Open socket and this is the first acceptable
-			 * ACK we have received, this would have acknowledged
-			 * our SYNACK so stop the SYNACK timer.
+			tcp_initialize_rcv_mss(sk);
+			tcp_fast_path_on(tp);
+		} else {
+			return 1;
+		}
+		break;
+
+	case TCP_FIN_WAIT1:
+		/* If we enter the TCP_FIN_WAIT1 state and we are a
+		 * Fast Open socket and this is the first acceptable
+		 * ACK we have received, this would have acknowledged
+		 * our SYNACK so stop the SYNACK timer.
+		 */
+		if (req != NULL) {
+			/* Return RST if ack_seq is invalid.
+			 * Note that RFC793 only says to generate a
+			 * DUPACK for it but for TCP Fast Open it seems
+			 * better to treat this case like TCP_SYN_RECV
+			 * above.
 			 */
-			if (req != NULL) {
-				/* Return RST if ack_seq is invalid.
-				 * Note that RFC793 only says to generate a
-				 * DUPACK for it but for TCP Fast Open it seems
-				 * better to treat this case like TCP_SYN_RECV
-				 * above.
-				 */
-				if (!acceptable)
+			if (!acceptable)
+				return 1;
+			/* We no longer need the request sock. */
+			reqsk_fastopen_remove(sk, req, false);
+			tcp_rearm_rto(sk);
+		}
+		if (tp->snd_una == tp->write_seq) {
+			struct dst_entry *dst;
+
+			tcp_set_state(sk, TCP_FIN_WAIT2);
+			sk->sk_shutdown |= SEND_SHUTDOWN;
+
+			dst = __sk_dst_get(sk);
+			if (dst)
+				dst_confirm(dst);
+
+			if (!sock_flag(sk, SOCK_DEAD)) {
+				/* Wake up lingering close() */
+				sk->sk_state_change(sk);
+			} else {
+				int tmo;
+
+				if (tp->linger2 < 0 ||
+				    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+				     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
+					tcp_done(sk);
+					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
 					return 1;
-				/* We no longer need the request sock. */
-				reqsk_fastopen_remove(sk, req, false);
-				tcp_rearm_rto(sk);
-			}
-			if (tp->snd_una == tp->write_seq) {
-				struct dst_entry *dst;
-
-				tcp_set_state(sk, TCP_FIN_WAIT2);
-				sk->sk_shutdown |= SEND_SHUTDOWN;
-
-				dst = __sk_dst_get(sk);
-				if (dst)
-					dst_confirm(dst);
-
-				if (!sock_flag(sk, SOCK_DEAD))
-					/* Wake up lingering close() */
-					sk->sk_state_change(sk);
-				else {
-					int tmo;
-
-					if (tp->linger2 < 0 ||
-					    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
-					     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
-						tcp_done(sk);
-						NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
-						return 1;
-					}
+				}
 
-					tmo = tcp_fin_time(sk);
-					if (tmo > TCP_TIMEWAIT_LEN) {
-						inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
-					} else if (th->fin || sock_owned_by_user(sk)) {
-						/* Bad case. We could lose such FIN otherwise.
-						 * It is not a big problem, but it looks confusing
-						 * and not so rare event. We still can lose it now,
-						 * if it spins in bh_lock_sock(), but it is really
-						 * marginal case.
-						 */
-						inet_csk_reset_keepalive_timer(sk, tmo);
-					} else {
-						tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
-						goto discard;
-					}
+				tmo = tcp_fin_time(sk);
+				if (tmo > TCP_TIMEWAIT_LEN) {
+					inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+				} else if (th->fin || sock_owned_by_user(sk)) {
+					/* Bad case. We could lose such FIN otherwise.
+					 * It is not a big problem, but it looks confusing
+					 * and not so rare event. We still can lose it now,
+					 * if it spins in bh_lock_sock(), but it is really
+					 * marginal case.
+					 */
+					inet_csk_reset_keepalive_timer(sk, tmo);
+				} else {
+					tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
+					goto discard;
 				}
 			}
-			break;
+		}
+		break;
 
-		case TCP_CLOSING:
-			if (tp->snd_una == tp->write_seq) {
-				tcp_time_wait(sk, TCP_TIME_WAIT, 0);
-				goto discard;
-			}
-			break;
+	case TCP_CLOSING:
+		if (tp->snd_una == tp->write_seq) {
+			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+			goto discard;
+		}
+		break;
 
-		case TCP_LAST_ACK:
-			if (tp->snd_una == tp->write_seq) {
-				tcp_update_metrics(sk);
-				tcp_done(sk);
-				goto discard;
-			}
-			break;
+	case TCP_LAST_ACK:
+		if (tp->snd_una == tp->write_seq) {
+			tcp_update_metrics(sk);
+			tcp_done(sk);
+			goto discard;
 		}
+		break;
 	}
 
 	/* step 6: check the URG bit */

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process
  2013-05-25  1:03   ` [PATCH v3 " Eric Dumazet
@ 2013-05-25  4:02     ` Joe Perches
  2013-05-25  4:06       ` [PATCH net-next 2/2] tcp: Remove 2 indentation levels " Joe Perches
  2013-05-25  4:24       ` [PATCH net-next 1/2] tcp: Remove another indentation level " Eric Dumazet
  2013-05-26  6:23     ` [PATCH v3 net-next] tcp: remove one indentation level in tcp_rcv_state_process() David Miller
  1 sibling, 2 replies; 14+ messages in thread
From: Joe Perches @ 2013-05-25  4:02 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David Miller, netdev, Yuchung Cheng

case TCP_SYN_RECV: can have an indentation level removed
by converting:

	if (acceptable) {
		[...];
	} else {
		return 1;
	}

to
	if (!acceptable)
		return 1;
	[...];

Reflow comments to fit 80 columns.

A neatening only patch.

Signed-off-by: Joe Perches <joe@perches.com>
---

On top of Eric's neatening patch...

 net/ipv4/tcp_input.c | 111 ++++++++++++++++++++++++---------------------------
 1 file changed, 52 insertions(+), 59 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4061425..71bd87d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5612,70 +5612,63 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
 	switch (sk->sk_state) {
 	case TCP_SYN_RECV:
-		if (acceptable) {
-			/* Once we leave TCP_SYN_RECV, we no longer
-			 * need req so release it.
-			 */
-			if (req) {
-				tcp_synack_rtt_meas(sk, req);
-				tp->total_retrans = req->num_retrans;
+		if (!acceptable)
+			return 1;
 
-				reqsk_fastopen_remove(sk, req, false);
-			} else {
-				/* Make sure socket is routed, for
-				 * correct metrics.
-				 */
-				icsk->icsk_af_ops->rebuild_header(sk);
-				tcp_init_congestion_control(sk);
+		/* Once we leave TCP_SYN_RECV, we no longer need req
+		 * so release it.
+		 */
+		if (req) {
+			tcp_synack_rtt_meas(sk, req);
+			tp->total_retrans = req->num_retrans;
 
-				tcp_mtup_init(sk);
-				tcp_init_buffer_space(sk);
-				tp->copied_seq = tp->rcv_nxt;
-			}
-			smp_mb();
-			tcp_set_state(sk, TCP_ESTABLISHED);
-			sk->sk_state_change(sk);
-
-			/* Note, that this wakeup is only for marginal
-			 * crossed SYN case. Passively open sockets
-			 * are not waked up, because sk->sk_sleep ==
-			 * NULL and sk->sk_socket == NULL.
-			 */
-			if (sk->sk_socket)
-				sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
-
-			tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
-			tp->snd_wnd = ntohs(th->window) <<
-					tp->rx_opt.snd_wscale;
-			tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
-			if (tp->rx_opt.tstamp_ok)
-				tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
-			if (req) {
-				/* Re-arm the timer because data may
-				 * have been sent out. This is similar
-				 * to the regular data transmission case
-				 * when new data has just been ack'ed.
-				 *
-				 * (TFO) - we could try to be more aggressive
-				 * and retransmitting any data sooner based
-				 * on when they are sent out.
-				 */
-				tcp_rearm_rto(sk);
-			} else
-				tcp_init_metrics(sk);
+			reqsk_fastopen_remove(sk, req, false);
+		} else {
+			/* Make sure socket is routed, for correct metrics. */
+			icsk->icsk_af_ops->rebuild_header(sk);
+			tcp_init_congestion_control(sk);
+
+			tcp_mtup_init(sk);
+			tcp_init_buffer_space(sk);
+			tp->copied_seq = tp->rcv_nxt;
+		}
+		smp_mb();
+		tcp_set_state(sk, TCP_ESTABLISHED);
+		sk->sk_state_change(sk);
 
-			/* Prevent spurious tcp_cwnd_restart() on
-			 * first data packet.
+		/* Note, that this wakeup is only for marginal crossed SYN case.
+		 * Passively open sockets are not waked up, because
+		 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
+		 */
+		if (sk->sk_socket)
+			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+
+		tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+		tp->snd_wnd = ntohs(th->window) <<
+			tp->rx_opt.snd_wscale;
+		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+
+		if (tp->rx_opt.tstamp_ok)
+			tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+		if (req) {
+			/* Re-arm the timer because data may have been sent out.
+			 * This is similar to the regular data transmission case
+			 * when new data has just been ack'ed.
+			 *
+			 * (TFO) - we could try to be more aggressive and
+			 * retransmitting any data sooner based on when they
+			 * are sent out.
 			 */
-			tp->lsndtime = tcp_time_stamp;
+			tcp_rearm_rto(sk);
+		} else
+			tcp_init_metrics(sk);
 
-			tcp_initialize_rcv_mss(sk);
-			tcp_fast_path_on(tp);
-		} else {
-			return 1;
-		}
+		/* Prevent spurious tcp_cwnd_restart() on first data packet */
+		tp->lsndtime = tcp_time_stamp;
+
+		tcp_initialize_rcv_mss(sk);
+		tcp_fast_path_on(tp);
 		break;
 
 	case TCP_FIN_WAIT1:
-- 
1.8.1.2.459.gbcd45b4.dirty

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 2/2] tcp: Remove 2 indentation levels in tcp_rcv_state_process
  2013-05-25  4:02     ` [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process Joe Perches
@ 2013-05-25  4:06       ` Joe Perches
  2013-05-26  6:23         ` David Miller
  2013-05-25  4:24       ` [PATCH net-next 1/2] tcp: Remove another indentation level " Eric Dumazet
  1 sibling, 1 reply; 14+ messages in thread
From: Joe Perches @ 2013-05-25  4:06 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David Miller, netdev, Yuchung Cheng

case TCP_FIN_WAIT1 can also be simplified by reversing tests
and adding breaks;

Add braces after case and move automatic definitions.

Signed-off-by: Joe Perches <joe@perches.com>
---
 net/ipv4/tcp_input.c | 76 +++++++++++++++++++++++++++-------------------------
 1 file changed, 39 insertions(+), 37 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 71bd87d..34da5a5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5671,7 +5671,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 		tcp_fast_path_on(tp);
 		break;
 
-	case TCP_FIN_WAIT1:
+	case TCP_FIN_WAIT1: {
+		struct dst_entry *dst;
+		int tmo;
+
 		/* If we enter the TCP_FIN_WAIT1 state and we are a
 		 * Fast Open socket and this is the first acceptable
 		 * ACK we have received, this would have acknowledged
@@ -5690,48 +5693,47 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 			reqsk_fastopen_remove(sk, req, false);
 			tcp_rearm_rto(sk);
 		}
-		if (tp->snd_una == tp->write_seq) {
-			struct dst_entry *dst;
+		if (tp->snd_una != tp->write_seq)
+			break;
 
-			tcp_set_state(sk, TCP_FIN_WAIT2);
-			sk->sk_shutdown |= SEND_SHUTDOWN;
+		tcp_set_state(sk, TCP_FIN_WAIT2);
+		sk->sk_shutdown |= SEND_SHUTDOWN;
 
-			dst = __sk_dst_get(sk);
-			if (dst)
-				dst_confirm(dst);
+		dst = __sk_dst_get(sk);
+		if (dst)
+			dst_confirm(dst);
 
-			if (!sock_flag(sk, SOCK_DEAD)) {
-				/* Wake up lingering close() */
-				sk->sk_state_change(sk);
-			} else {
-				int tmo;
-
-				if (tp->linger2 < 0 ||
-				    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
-				     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
-					tcp_done(sk);
-					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
-					return 1;
-				}
+		if (!sock_flag(sk, SOCK_DEAD)) {
+			/* Wake up lingering close() */
+			sk->sk_state_change(sk);
+			break;
+		}
 
-				tmo = tcp_fin_time(sk);
-				if (tmo > TCP_TIMEWAIT_LEN) {
-					inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
-				} else if (th->fin || sock_owned_by_user(sk)) {
-					/* Bad case. We could lose such FIN otherwise.
-					 * It is not a big problem, but it looks confusing
-					 * and not so rare event. We still can lose it now,
-					 * if it spins in bh_lock_sock(), but it is really
-					 * marginal case.
-					 */
-					inet_csk_reset_keepalive_timer(sk, tmo);
-				} else {
-					tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
-					goto discard;
-				}
-			}
+		if (tp->linger2 < 0 ||
+		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+		     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
+			tcp_done(sk);
+			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+			return 1;
+		}
+
+		tmo = tcp_fin_time(sk);
+		if (tmo > TCP_TIMEWAIT_LEN) {
+			inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+		} else if (th->fin || sock_owned_by_user(sk)) {
+			/* Bad case. We could lose such FIN otherwise.
+			 * It is not a big problem, but it looks confusing
+			 * and not so rare event. We still can lose it now,
+			 * if it spins in bh_lock_sock(), but it is really
+			 * marginal case.
+			 */
+			inet_csk_reset_keepalive_timer(sk, tmo);
+		} else {
+			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
+			goto discard;
 		}
 		break;
+	}
 
 	case TCP_CLOSING:
 		if (tp->snd_una == tp->write_seq) {
-- 
1.8.1.2.459.gbcd45b4.dirty

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process
  2013-05-25  4:02     ` [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process Joe Perches
  2013-05-25  4:06       ` [PATCH net-next 2/2] tcp: Remove 2 indentation levels " Joe Perches
@ 2013-05-25  4:24       ` Eric Dumazet
  2013-05-25  4:36         ` [PATCH net-next 1/2 V2] " Joe Perches
  1 sibling, 1 reply; 14+ messages in thread
From: Eric Dumazet @ 2013-05-25  4:24 UTC (permalink / raw)
  To: Joe Perches; +Cc: David Miller, netdev, Yuchung Cheng

On Fri, 2013-05-24 at 21:02 -0700, Joe Perches wrote:
> case TCP_SYN_RECV: can have an indentation level removed
> by converting:
> 
> 	if (acceptable) {
> 		[...];
> 	} else {
> 		return 1;
> 	}
> 
> to
> 	if (!acceptable)
> 		return 1;
> 	[...];
> 
> Reflow comments to fit 80 columns.
> 
> A neatening only patch.
> 
> Signed-off-by: Joe Perches <joe@perches.com>
> ---

> +		tp->snd_wnd = ntohs(th->window) <<
> +			tp->rx_opt.snd_wscale;

single line ?

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH net-next 1/2 V2] tcp: Remove another indentation level in tcp_rcv_state_process
  2013-05-25  4:24       ` [PATCH net-next 1/2] tcp: Remove another indentation level " Eric Dumazet
@ 2013-05-25  4:36         ` Joe Perches
  2013-05-26  6:23           ` David Miller
  0 siblings, 1 reply; 14+ messages in thread
From: Joe Perches @ 2013-05-25  4:36 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David Miller, netdev, Yuchung Cheng

case TCP_SYN_RECV: can have another indentation level removed
by converting

	if (acceptable) {
		...;
	} else {
		return 1;
	}

to
	if (!acceptable)
		return 1;
	...;

Reflow code and comments to fit 80 columns.

Another pure cleanup patch.

Signed-off-by: Joe Perches <joe@perches.com>
Improved-by: Eric Dumazet <eric.dumazet@gmail.com>
---

On Fri, 2013-05-24 at 21:24 -0700, Eric Dumazet wrote:
> +		tp->snd_wnd = ntohs(th->window) <<
> > +			tp->rx_opt.snd_wscale;
> single line ?

Right.  Resubmitting, Thanks Eric.

 net/ipv4/tcp_input.c | 110 ++++++++++++++++++++++++---------------------------
 1 file changed, 51 insertions(+), 59 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4061425..413b480 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5612,70 +5612,62 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
 	switch (sk->sk_state) {
 	case TCP_SYN_RECV:
-		if (acceptable) {
-			/* Once we leave TCP_SYN_RECV, we no longer
-			 * need req so release it.
-			 */
-			if (req) {
-				tcp_synack_rtt_meas(sk, req);
-				tp->total_retrans = req->num_retrans;
+		if (!acceptable)
+			return 1;
 
-				reqsk_fastopen_remove(sk, req, false);
-			} else {
-				/* Make sure socket is routed, for
-				 * correct metrics.
-				 */
-				icsk->icsk_af_ops->rebuild_header(sk);
-				tcp_init_congestion_control(sk);
+		/* Once we leave TCP_SYN_RECV, we no longer need req
+		 * so release it.
+		 */
+		if (req) {
+			tcp_synack_rtt_meas(sk, req);
+			tp->total_retrans = req->num_retrans;
 
-				tcp_mtup_init(sk);
-				tcp_init_buffer_space(sk);
-				tp->copied_seq = tp->rcv_nxt;
-			}
-			smp_mb();
-			tcp_set_state(sk, TCP_ESTABLISHED);
-			sk->sk_state_change(sk);
-
-			/* Note, that this wakeup is only for marginal
-			 * crossed SYN case. Passively open sockets
-			 * are not waked up, because sk->sk_sleep ==
-			 * NULL and sk->sk_socket == NULL.
-			 */
-			if (sk->sk_socket)
-				sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
-
-			tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
-			tp->snd_wnd = ntohs(th->window) <<
-					tp->rx_opt.snd_wscale;
-			tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
-			if (tp->rx_opt.tstamp_ok)
-				tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
-			if (req) {
-				/* Re-arm the timer because data may
-				 * have been sent out. This is similar
-				 * to the regular data transmission case
-				 * when new data has just been ack'ed.
-				 *
-				 * (TFO) - we could try to be more aggressive
-				 * and retransmitting any data sooner based
-				 * on when they are sent out.
-				 */
-				tcp_rearm_rto(sk);
-			} else
-				tcp_init_metrics(sk);
+			reqsk_fastopen_remove(sk, req, false);
+		} else {
+			/* Make sure socket is routed, for correct metrics. */
+			icsk->icsk_af_ops->rebuild_header(sk);
+			tcp_init_congestion_control(sk);
+
+			tcp_mtup_init(sk);
+			tcp_init_buffer_space(sk);
+			tp->copied_seq = tp->rcv_nxt;
+		}
+		smp_mb();
+		tcp_set_state(sk, TCP_ESTABLISHED);
+		sk->sk_state_change(sk);
 
-			/* Prevent spurious tcp_cwnd_restart() on
-			 * first data packet.
+		/* Note, that this wakeup is only for marginal crossed SYN case.
+		 * Passively open sockets are not waked up, because
+		 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
+		 */
+		if (sk->sk_socket)
+			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+
+		tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+		tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
+		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+
+		if (tp->rx_opt.tstamp_ok)
+			tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+		if (req) {
+			/* Re-arm the timer because data may have been sent out.
+			 * This is similar to the regular data transmission case
+			 * when new data has just been ack'ed.
+			 *
+			 * (TFO) - we could try to be more aggressive and
+			 * retransmitting any data sooner based on when they
+			 * are sent out.
 			 */
-			tp->lsndtime = tcp_time_stamp;
+			tcp_rearm_rto(sk);
+		} else
+			tcp_init_metrics(sk);
 
-			tcp_initialize_rcv_mss(sk);
-			tcp_fast_path_on(tp);
-		} else {
-			return 1;
-		}
+		/* Prevent spurious tcp_cwnd_restart() on first data packet */
+		tp->lsndtime = tcp_time_stamp;
+
+		tcp_initialize_rcv_mss(sk);
+		tcp_fast_path_on(tp);
 		break;
 
 	case TCP_FIN_WAIT1:
-- 
1.8.1.2.459.gbcd45b4.dirty

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 net-next] tcp: remove one indentation level in tcp_rcv_state_process()
  2013-05-25  1:03   ` [PATCH v3 " Eric Dumazet
  2013-05-25  4:02     ` [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process Joe Perches
@ 2013-05-26  6:23     ` David Miller
  1 sibling, 0 replies; 14+ messages in thread
From: David Miller @ 2013-05-26  6:23 UTC (permalink / raw)
  To: eric.dumazet; +Cc: netdev, ycheng

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Fri, 24 May 2013 18:03:54 -0700

> From: Eric Dumazet <edumazet@google.com>
> 
> Remove one level of indentation 'introduced' in commit
> c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
> 
> if (true) {
>         ...
> }
> 
> @acceptable variable is a boolean.
> 
> This patch is a pure cleanup.
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>

Applied.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next 1/2 V2] tcp: Remove another indentation level in tcp_rcv_state_process
  2013-05-25  4:36         ` [PATCH net-next 1/2 V2] " Joe Perches
@ 2013-05-26  6:23           ` David Miller
  0 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2013-05-26  6:23 UTC (permalink / raw)
  To: joe; +Cc: eric.dumazet, netdev, ycheng

From: Joe Perches <joe@perches.com>
Date: Fri, 24 May 2013 21:36:13 -0700

> case TCP_SYN_RECV: can have another indentation level removed
> by converting
> 
> 	if (acceptable) {
> 		...;
> 	} else {
> 		return 1;
> 	}
> 
> to
> 	if (!acceptable)
> 		return 1;
> 	...;
> 
> Reflow code and comments to fit 80 columns.
> 
> Another pure cleanup patch.
> 
> Signed-off-by: Joe Perches <joe@perches.com>
> Improved-by: Eric Dumazet <eric.dumazet@gmail.com>

Applied.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next 2/2] tcp: Remove 2 indentation levels in tcp_rcv_state_process
  2013-05-25  4:06       ` [PATCH net-next 2/2] tcp: Remove 2 indentation levels " Joe Perches
@ 2013-05-26  6:23         ` David Miller
  0 siblings, 0 replies; 14+ messages in thread
From: David Miller @ 2013-05-26  6:23 UTC (permalink / raw)
  To: joe; +Cc: eric.dumazet, netdev, ycheng

From: Joe Perches <joe@perches.com>
Date: Fri, 24 May 2013 21:06:58 -0700

> case TCP_FIN_WAIT1 can also be simplified by reversing tests
> and adding breaks;
> 
> Add braces after case and move automatic definitions.
> 
> Signed-off-by: Joe Perches <joe@perches.com>

Applied.

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2013-05-26  6:23 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-04-19  3:16 [PATCH net-next] tcp: remove one indentation level in tcp_rcv_state_process() Eric Dumazet
2013-04-19 17:04 ` Eric Dumazet
2013-04-19 18:22   ` David Miller
2013-05-23 15:55 ` [PATCH v2 " Eric Dumazet
2013-05-23 17:09   ` Joe Perches
2013-05-23 17:12     ` Eric Dumazet
2013-05-25  1:03   ` [PATCH v3 " Eric Dumazet
2013-05-25  4:02     ` [PATCH net-next 1/2] tcp: Remove another indentation level in tcp_rcv_state_process Joe Perches
2013-05-25  4:06       ` [PATCH net-next 2/2] tcp: Remove 2 indentation levels " Joe Perches
2013-05-26  6:23         ` David Miller
2013-05-25  4:24       ` [PATCH net-next 1/2] tcp: Remove another indentation level " Eric Dumazet
2013-05-25  4:36         ` [PATCH net-next 1/2 V2] " Joe Perches
2013-05-26  6:23           ` David Miller
2013-05-26  6:23     ` [PATCH v3 net-next] tcp: remove one indentation level in tcp_rcv_state_process() David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).