netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer
@ 2013-04-19 16:12 Daniel Borkmann
  2013-04-19 16:12 ` [PATCH net-next 2/2] packet: account statistics only in tpacket_stats_u Daniel Borkmann
  2013-04-25  5:29 ` [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer David Miller
  0 siblings, 2 replies; 4+ messages in thread
From: Daniel Borkmann @ 2013-04-19 16:12 UTC (permalink / raw)
  To: davem; +Cc: netdev

There's a 4 byte hole in packet_ring_buffer structure before
prb_bdqc, that can be filled with 'pending' member, thus we can
reduce the overall structure size from 224 bytes to 216 bytes.
This also has the side-effect, that in struct packet_sock 2*4 byte
holes after the embedded packet_ring_buffer members are removed,
and overall, packet_sock can be reduced by 1 cacheline:

Before: size: 1344, cachelines: 21, members: 24
After:  size: 1280, cachelines: 20, members: 24

Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
---
 net/packet/internal.h | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/net/packet/internal.h b/net/packet/internal.h
index e891f02..650751b 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -54,6 +54,7 @@ struct pgv {
 
 struct packet_ring_buffer {
 	struct pgv		*pg_vec;
+
 	unsigned int		head;
 	unsigned int		frames_per_block;
 	unsigned int		frame_size;
@@ -63,8 +64,9 @@ struct packet_ring_buffer {
 	unsigned int		pg_vec_pages;
 	unsigned int		pg_vec_len;
 
-	struct tpacket_kbdq_core	prb_bdqc;
 	atomic_t		pending;
+
+	struct tpacket_kbdq_core	prb_bdqc;
 };
 
 extern struct mutex fanout_mutex;
-- 
1.7.11.7

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH net-next 2/2] packet: account statistics only in tpacket_stats_u
  2013-04-19 16:12 [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer Daniel Borkmann
@ 2013-04-19 16:12 ` Daniel Borkmann
  2013-04-25  5:30   ` David Miller
  2013-04-25  5:29 ` [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer David Miller
  1 sibling, 1 reply; 4+ messages in thread
From: Daniel Borkmann @ 2013-04-19 16:12 UTC (permalink / raw)
  To: davem; +Cc: netdev

Currently, packet_sock has a struct tpacket_stats stats member for
TPACKET_V1 and TPACKET_V2 statistic accounting, and with TPACKET_V3
``union tpacket_stats_u stats_u'' was introduced, where however only
statistics for TPACKET_V3 are held, and when copied to user space,
TPACKET_V3 does some hackery and access also tpacket_stats' stats,
although everything could have been done within the union itself.

Unify accounting within the tpacket_stats_u union so that we can
remove 8 bytes from packet_sock that are there unnecessary. Note that
even if we switch to TPACKET_V3 and would use non mmap(2)ed option,
this still works due to the union with same types + offsets, that are
exposed to the user space.

Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
---
 net/packet/af_packet.c | 37 ++++++++++++++++---------------------
 net/packet/internal.h  |  3 +--
 2 files changed, 17 insertions(+), 23 deletions(-)

diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7e387ff..3d8c017 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -475,7 +475,7 @@ static void init_prb_bdqc(struct packet_sock *po,
 	p1->hdrlen = po->tp_hdrlen;
 	p1->version = po->tp_version;
 	p1->last_kactive_blk_num = 0;
-	po->stats_u.stats3.tp_freeze_q_cnt = 0;
+	po->stats.stats3.tp_freeze_q_cnt = 0;
 	if (req_u->req3.tp_retire_blk_tov)
 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 	else
@@ -643,7 +643,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 	struct tpacket3_hdr *last_pkt;
 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 
-	if (po->stats.tp_drops)
+	if (po->stats.stats3.tp_drops)
 		status |= TP_STATUS_LOSING;
 
 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
@@ -748,7 +748,7 @@ static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 				  struct packet_sock *po)
 {
 	pkc->reset_pending_on_curr_blk = 1;
-	po->stats_u.stats3.tp_freeze_q_cnt++;
+	po->stats.stats3.tp_freeze_q_cnt++;
 }
 
 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
@@ -1634,7 +1634,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 	nf_reset(skb);
 
 	spin_lock(&sk->sk_receive_queue.lock);
-	po->stats.tp_packets++;
+	po->stats.stats1.tp_packets++;
 	skb->dropcount = atomic_read(&sk->sk_drops);
 	__skb_queue_tail(&sk->sk_receive_queue, skb);
 	spin_unlock(&sk->sk_receive_queue.lock);
@@ -1643,7 +1643,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
 drop_n_acct:
 	spin_lock(&sk->sk_receive_queue.lock);
-	po->stats.tp_drops++;
+	po->stats.stats1.tp_drops++;
 	atomic_inc(&sk->sk_drops);
 	spin_unlock(&sk->sk_receive_queue.lock);
 
@@ -1762,10 +1762,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
 	 * at packet level.
 	 */
-		if (po->stats.tp_drops)
+		if (po->stats.stats1.tp_drops)
 			status |= TP_STATUS_LOSING;
 	}
-	po->stats.tp_packets++;
+	po->stats.stats1.tp_packets++;
 	if (copy_skb) {
 		status |= TP_STATUS_COPY;
 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
@@ -1860,7 +1860,7 @@ drop:
 	return 0;
 
 ring_is_full:
-	po->stats.tp_drops++;
+	po->stats.stats1.tp_drops++;
 	spin_unlock(&sk->sk_receive_queue.lock);
 
 	sk->sk_data_ready(sk, 0);
@@ -3204,8 +3204,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 	struct sock *sk = sock->sk;
 	struct packet_sock *po = pkt_sk(sk);
 	void *data = &val;
-	struct tpacket_stats st;
-	union tpacket_stats_u st_u;
+	union tpacket_stats_u st;
 
 	if (level != SOL_PACKET)
 		return -ENOPROTOOPT;
@@ -3219,22 +3218,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 	switch (optname) {
 	case PACKET_STATISTICS:
 		spin_lock_bh(&sk->sk_receive_queue.lock);
+		memcpy(&st, &po->stats, sizeof(st));
+		memset(&po->stats, 0, sizeof(po->stats));
+		spin_unlock_bh(&sk->sk_receive_queue.lock);
+
 		if (po->tp_version == TPACKET_V3) {
 			lv = sizeof(struct tpacket_stats_v3);
-			memcpy(&st_u.stats3, &po->stats,
-			       sizeof(struct tpacket_stats));
-			st_u.stats3.tp_freeze_q_cnt =
-					po->stats_u.stats3.tp_freeze_q_cnt;
-			st_u.stats3.tp_packets += po->stats.tp_drops;
-			data = &st_u.stats3;
+			data = &st.stats3;
 		} else {
 			lv = sizeof(struct tpacket_stats);
-			st = po->stats;
-			st.tp_packets += st.tp_drops;
-			data = &st;
+			data = &st.stats1;
 		}
-		memset(&po->stats, 0, sizeof(st));
-		spin_unlock_bh(&sk->sk_receive_queue.lock);
+
 		break;
 	case PACKET_AUXDATA:
 		val = po->auxdata;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 650751b..c4e4b45 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -93,8 +93,7 @@ struct packet_sock {
 	/* struct sock has to be the first member of packet_sock */
 	struct sock		sk;
 	struct packet_fanout	*fanout;
-	struct tpacket_stats	stats;
-	union  tpacket_stats_u	stats_u;
+	union  tpacket_stats_u	stats;
 	struct packet_ring_buffer	rx_ring;
 	struct packet_ring_buffer	tx_ring;
 	int			copy_thresh;
-- 
1.7.11.7

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer
  2013-04-19 16:12 [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer Daniel Borkmann
  2013-04-19 16:12 ` [PATCH net-next 2/2] packet: account statistics only in tpacket_stats_u Daniel Borkmann
@ 2013-04-25  5:29 ` David Miller
  1 sibling, 0 replies; 4+ messages in thread
From: David Miller @ 2013-04-25  5:29 UTC (permalink / raw)
  To: dborkman; +Cc: netdev

From: Daniel Borkmann <dborkman@redhat.com>
Date: Fri, 19 Apr 2013 18:12:28 +0200

> There's a 4 byte hole in packet_ring_buffer structure before
> prb_bdqc, that can be filled with 'pending' member, thus we can
> reduce the overall structure size from 224 bytes to 216 bytes.
> This also has the side-effect, that in struct packet_sock 2*4 byte
> holes after the embedded packet_ring_buffer members are removed,
> and overall, packet_sock can be reduced by 1 cacheline:
> 
> Before: size: 1344, cachelines: 21, members: 24
> After:  size: 1280, cachelines: 20, members: 24
> 
> Signed-off-by: Daniel Borkmann <dborkman@redhat.com>

Applied.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next 2/2] packet: account statistics only in tpacket_stats_u
  2013-04-19 16:12 ` [PATCH net-next 2/2] packet: account statistics only in tpacket_stats_u Daniel Borkmann
@ 2013-04-25  5:30   ` David Miller
  0 siblings, 0 replies; 4+ messages in thread
From: David Miller @ 2013-04-25  5:30 UTC (permalink / raw)
  To: dborkman; +Cc: netdev

From: Daniel Borkmann <dborkman@redhat.com>
Date: Fri, 19 Apr 2013 18:12:29 +0200

> Currently, packet_sock has a struct tpacket_stats stats member for
> TPACKET_V1 and TPACKET_V2 statistic accounting, and with TPACKET_V3
> ``union tpacket_stats_u stats_u'' was introduced, where however only
> statistics for TPACKET_V3 are held, and when copied to user space,
> TPACKET_V3 does some hackery and access also tpacket_stats' stats,
> although everything could have been done within the union itself.
> 
> Unify accounting within the tpacket_stats_u union so that we can
> remove 8 bytes from packet_sock that are there unnecessary. Note that
> even if we switch to TPACKET_V3 and would use non mmap(2)ed option,
> this still works due to the union with same types + offsets, that are
> exposed to the user space.
> 
> Signed-off-by: Daniel Borkmann <dborkman@redhat.com>

Applied.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2013-04-25  5:30 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-04-19 16:12 [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer Daniel Borkmann
2013-04-19 16:12 ` [PATCH net-next 2/2] packet: account statistics only in tpacket_stats_u Daniel Borkmann
2013-04-25  5:30   ` David Miller
2013-04-25  5:29 ` [PATCH net-next 1/2] packet: reorder a member in packet_ring_buffer David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).