netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* resurrecting tcphealth
@ 2012-07-12 20:55 Piotr Sawuk
  2012-07-12 21:35 ` Stephen Hemminger
                   ` (2 more replies)
  0 siblings, 3 replies; 24+ messages in thread
From: Piotr Sawuk @ 2012-07-12 20:55 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel

hello! I haven't done any kernel-hacking before, so be patient.

I got as far as to make tcphealth run, but now I need some help:
how does read-locking work in the tcp_sock struct?
the original code (for 2.5.1) made a read_lock(&head->lock) with
struct tcp_ehash_bucket *head = &tcp_ehash[i];
at the beginning of the for-loop over all sk=head->chain.
i.e.
       for (i=0; i < tcp_ehash_size; i++) {
               struct tcp_ehash_bucket *head = &tcp_ehash[i];
               struct sock *sk;
               struct tcp_opt *tp;

               read_lock(&head->lock);
               for (sk=head->chain; sk; sk=sk->next) {
                       if (!TCP_INET_FAMILY(sk->family))
                               continue;

and in the new kernel this construction has been replaced by

        if (st->state==TCP_SEQ_STATE_ESTABLISHED)
...
static struct tcp_seq_afinfo tcphealth_seq_afinfo
...

and the example with proc/net/tcp is seemingly not locking anything. do I
actually need a read-lock for diagnostic data from tcp_sock? why did the
original author need to lock the whole chain of tcp_sock?

here's my patch against 3.4.4 so far, any further comments welcome:

diff -rub linux-3.4.4/include/linux/tcp.h
linux-3.4.4-heal-lsm/include/linux/tcp.h
--- linux-3.4.4/include/linux/tcp.h	2012-06-22 20:37:50.000000000 +0200
+++ linux-3.4.4-heal-lsm/include/linux/tcp.h	2012-07-06 10:23:13.000000000
+0200
@@ -7,6 +7,8 @@
  *
  * Version:	@(#)tcp.h	1.0.2	04/28/93
  *
+ *      Federico D. Sacerdoti	:	Added TCP health counters.
+ *
  * Author:	Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  *
  *		This program is free software; you can redistribute it and/or
@@ -472,6 +474,15 @@
 	 * contains related tcp_cookie_transactions fields.
 	 */
 	struct tcp_cookie_values  *cookie_values;
+
+	/*
+	 * TCP health monitoring counters.
+	 */
+	__u32	dup_acks_sent;
+	__u32	dup_pkts_recv;
+	__u32	acks_sent;
+	__u32	pkts_recv;
+		__u32	last_ack_sent;	/* Sequence number of the last ack we sent. */
 };

 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff -rub linux-3.4.4/net/ipv4/tcp_input.c
linux-3.4.4-heal-lsm/net/ipv4/tcp_input.c
--- linux-3.4.4/net/ipv4/tcp_input.c	2012-06-22 20:37:50.000000000 +0200
+++ linux-3.4.4-heal-lsm/net/ipv4/tcp_input.c	2012-07-06
10:12:12.000000000 +0200
@@ -59,7 +59,8 @@
  *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
  *					engine. Lots of bugs are found.
  *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
- */
+ *		Federico D. Sacerdoti:	Added TCP health monitoring.
+*/

 #define pr_fmt(fmt) "TCP: " fmt

@@ -4414,6 +4415,8 @@
 		}

 		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+			/* Course retransmit inefficiency- this packet has been received
twice. */
+			tp->dup_pkts_recv++;
 			SOCK_DEBUG(sk, "ofo packet was already received\n");
 			__skb_unlink(skb, &tp->out_of_order_queue);
 			__kfree_skb(skb);
@@ -4664,6 +4667,10 @@
 		return;
 	}

+	/* A packet is a "duplicate" if it contains bytes we have already
received. */
+	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
+		tp->dup_pkts_recv++;
+
 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
 		/* A retransmit, 2nd most common case.  Force an immediate ack. */
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
@@ -5375,6 +5382,14 @@

 	tp->rx_opt.saw_tstamp = 0;

+	/*
+	 *	Tcp health monitoring is interested in
+	 *	total per-connection packet arrivals.
+	 *	This is in the fast path, but is quick.
+	 */
+
+	tp->pkts_recv++;
+
 	/*	pred_flags is 0xS?10 << 16 + snd_wnd
 	 *	if header_prediction is to be made
 	 *	'S' will always be tp->tcp_header_len >> 2
diff -rub linux-3.4.4/net/ipv4/tcp_ipv4.c
linux-3.4.4-heal-lsm/net/ipv4/tcp_ipv4.c
--- linux-3.4.4/net/ipv4/tcp_ipv4.c	2012-06-22 20:37:50.000000000 +0200
+++ linux-3.4.4-heal-lsm/net/ipv4/tcp_ipv4.c	2012-07-11 09:34:22.000000000
+0200
@@ -2533,6 +2533,82 @@
 	return 0;
 }

+
+/*
+ *	Output /proc/net/tcphealth
+ */
+#define LINESZ 128
+
+int tcp_health_seq_show(struct seq_file *seq, void *v)
+{
+	int len, num;
+       char srcIP[32], destIP[32];
+
+	unsigned long  SmoothedRttEstimate,
+		AcksSent, DupAcksSent, PktsRecv, DupPktsRecv;
+	struct tcp_iter_state *st;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq,
+		"TCP Health Monitoring (established connections only)\n"
+		" -Duplicate ACKs indicate lost or reordered packets on the connection.\n"
+		" -Duplicate Packets Received signal a slow and badly inefficient
connection.\n"
+		" -RttEst estimates how long future packets will take on a round trip
over the connection.\n"
+		"id   Local Address        Remote Address       RttEst(ms) AcksSent "
+		"DupAcksSent PktsRecv DupPktsRecv\n");
+		goto out;
+	}
+
+	/* Loop through established TCP connections */
+	st = seq->private;
+
+
+	if (st->state==TCP_SEQ_STATE_ESTABLISHED)
+	{
+/*	; //insert read-lock here */
+		const struct tcp_sock *tp = tcp_sk(v);
+		const struct inet_sock *inet = inet_sk(v);
+		__be32 dest = inet->inet_daddr;
+		__be32 src = inet->inet_rcv_saddr;
+		__u16 destp = ntohs(inet->inet_dport);
+		__u16 srcp = ntohs(inet->inet_sport);
+
+		num=st->num;
+		SmoothedRttEstimate = (tp->srtt >> 3);
+		AcksSent = tp->acks_sent;
+		DupAcksSent = tp->dup_acks_sent;
+		PktsRecv = tp->pkts_recv;
+		DupPktsRecv = tp->dup_pkts_recv;
+
+		sprintf(srcIP, "%lu.%lu.%lu.%lu:%u",
+			((src >> 24) & 0xFF), ((src >> 16) & 0xFF), ((src >> 8) & 0xFF), (src
& 0xFF),
+			srcp);
+		sprintf(destIP, "%3d.%3d.%3d.%3d:%u",
+			((dest >> 24) & 0xFF), ((dest >> 16) & 0xFF), ((dest >> 8) & 0xFF),
(dest & 0xFF),
+			destp);
+
+		seq_printf(seq, "%d: %-21s %-21s "
+				"%8lu %8lu %8lu %8lu %8lu%n",
+				num,
+				srcIP,
+				destIP,
+				SmoothedRttEstimate,
+				AcksSent,
+				DupAcksSent,
+				PktsRecv,
+				DupPktsRecv,
+
+				&len
+			);
+
+		seq_printf(seq, "%*s\n", LINESZ - 1 - len, "");
+/*	; //insert read-unlock here */
+	}
+
+out:
+	return 0;
+}
+
 static const struct file_operations tcp_afinfo_seq_fops = {
 	.owner   = THIS_MODULE,
 	.open    = tcp_seq_open,
@@ -2541,6 +2617,15 @@
 	.release = seq_release_net
 };

+static struct tcp_seq_afinfo tcphealth_seq_afinfo = {
+	.name		= "tcphealth",
+	.family		= AF_INET,
+	.seq_fops	= &tcp_afinfo_seq_fops,
+	.seq_ops	= {
+		.show		= tcp_health_seq_show,
+	},
+};
+
 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
 	.name		= "tcp",
 	.family		= AF_INET,
@@ -2552,12 +2637,15 @@

 static int __net_init tcp4_proc_init_net(struct net *net)
 {
-	return tcp_proc_register(net, &tcp4_seq_afinfo);
+	int ret=tcp_proc_register(net, &tcp4_seq_afinfo);
+	if(ret==0) ret=tcp_proc_register(net, &tcphealth_seq_afinfo);
+	return ret;
 }

 static void __net_exit tcp4_proc_exit_net(struct net *net)
 {
 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
+	tcp_proc_unregister(net, &tcphealth_seq_afinfo);
 }

 static struct pernet_operations tcp4_net_ops = {
diff -rub linux-3.4.4/net/ipv4/tcp_output.c
linux-3.4.4-heal-lsm/net/ipv4/tcp_output.c
--- linux-3.4.4/net/ipv4/tcp_output.c	2012-06-22 20:37:50.000000000 +0200
+++ linux-3.4.4-heal-lsm/net/ipv4/tcp_output.c	2012-07-06
17:15:14.000000000 +0200
@@ -31,6 +31,7 @@
  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  *		Cacophonix Gaul :	draft-minshall-nagle-01
  *		J Hadi Salim	:	ECN support
+ *	Federico D. Sacerdoti	:	Added TCP health monitoring.
  *
  */

@@ -2754,8 +2755,15 @@
 	skb_reserve(buff, MAX_TCP_HEADER);
 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);

+		/* If the rcv_nxt has not advanced since sending our last ACK, this is
a duplicate. */
+		if (tcp_sk(sk)->rcv_nxt == tcp_sk(sk)->last_ack_sent)
+			tcp_sk(sk)->dup_acks_sent++;
+		/* Record the total number of acks sent on this connection. */
+		tcp_sk(sk)->acks_sent++;
+
 	/* Send it off, this clears delayed acks for us. */
 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
+		tcp_sk(sk)->last_ack_sent = tcp_sk(sk)->rcv_nxt;
 	tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
 }

^ permalink raw reply	[flat|nested] 24+ messages in thread
* Re: resurrecting tcphealth
@ 2012-07-13  7:33 Piotr Sawuk
  2012-07-13 23:55 ` Stephen Hemminger
  0 siblings, 1 reply; 24+ messages in thread
From: Piotr Sawuk @ 2012-07-13  7:33 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel

On Do, 12.07.2012, 23:35, Stephen Hemminger wrote:
> On Thu, 12 Jul 2012 22:55:57 +0200
> "Piotr Sawuk" <a9702387@unet.univie.ac.at> wrote:
>
>> + *		Federico D. Sacerdoti:	Added TCP health monitoring.
>
> Please don't do this.
> The kernel community no longer maintains a list of contributors
> in the comments. The history is maintained in the git commit log.
>

thanks for the proof-reading, to Randy Dunlap too. now I have tested the
patch against mainline.

so, anyone has a comment on my actual question about the need for a read-lock?

currently my patch looks like this (again comments are welcome):

diff -rub A/include/linux/tcp.h B/include/linux/tcp.h
--- A/include/linux/tcp.h	2012-06-22 20:37:50.000000000 +0200
+++ B/include/linux/tcp.h	2012-07-06 10:23:13.000000000 +0200
@@ -472,6 +474,15 @@
 	 * contains related tcp_cookie_transactions fields.
 	 */
 	struct tcp_cookie_values  *cookie_values;
+
+	/*
+	 * TCP health monitoring counters.
+	 */
+	__u32	dup_acks_sent;
+	__u32	dup_pkts_recv;
+	__u32	acks_sent;
+	__u32	pkts_recv;
+	__u32	last_ack_sent;	/* Sequence number of the last ack we sent. */
 };

 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff -rub A/net/ipv4/tcp_input.c B/net/ipv4/tcp_input.c
--- A/net/ipv4/tcp_input.c	2012-06-22 20:37:50.000000000 +0200
+++ B/net/ipv4/tcp_input.c	2012-07-06 10:12:12.000000000 +0200
@@ -4414,6 +4415,8 @@
 		}

 		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+			/* Course retransmit inefficiency- this packet has been received twice. */
+			tp->dup_pkts_recv++;
 			SOCK_DEBUG(sk, "ofo packet was already received\n");
 			__skb_unlink(skb, &tp->out_of_order_queue);
 			__kfree_skb(skb);
@@ -4664,6 +4667,10 @@
 		return;
 	}

+	/* A packet is a "duplicate" if it contains bytes we have already
received. */
+	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
+		tp->dup_pkts_recv++;
+
 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
 		/* A retransmit, 2nd most common case.  Force an immediate ack. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
@@ -5375,6 +5382,13 @@

 	tp->rx_opt.saw_tstamp = 0;

+	/*
+	 *	Tcp health monitoring is interested in
+	 *	total per-connection packet arrivals.
+	 *	This is in the fast path, but is quick.
+	 */
+	tp->pkts_recv++;
+
 	/*	pred_flags is 0xS?10 << 16 + snd_wnd
 	 *	if header_prediction is to be made
 	 *	'S' will always be tp->tcp_header_len >> 2
diff -rub A/net/ipv4/tcp_ipv4.c B/net/ipv4/tcp_ipv4.c
--- A/net/ipv4/tcp_ipv4.c	2012-06-22 20:37:50.000000000 +0200
+++ B/net/ipv4/tcp_ipv4.c	2012-07-11 09:34:22.000000000 +0200
@@ -2533,6 +2533,82 @@
 	return 0;
 }

+
+/*
+ *	Output /proc/net/tcphealth
+ */
+#define LINESZ 128
+
+int tcp_health_seq_show(struct seq_file *seq, void *v)
+{
+	int len, num;
+	char srcIP[32], destIP[32];
+
+	unsigned long  SmoothedRttEstimate,
+		AcksSent, DupAcksSent, PktsRecv, DupPktsRecv;
+	struct tcp_iter_state *st;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq,
+		"TCP Health Monitoring (established connections only)\n"
+		" -Duplicate ACKs indicate lost or reordered packets on the
connection.\n"
+		" -Duplicate Packets Received signal a slow and badly inefficient
connection.\n"
+		" -RttEst estimates how long future packets will take on a round trip
over the connection.\n"
+		"id   Local Address        Remote Address       RttEst(ms) AcksSent "
+		"DupAcksSent PktsRecv DupPktsRecv\n");
+		goto out;
+	}
+
+	/* Loop through established TCP connections */
+	st = seq->private;
+
+
+	if (st->state == TCP_SEQ_STATE_ESTABLISHED)
+	{
+/*	; //insert read-lock here */
+		const struct tcp_sock *tp = tcp_sk(v);
+		const struct inet_sock *inet = inet_sk(v);
+		__be32 dest = inet->inet_daddr;
+		__be32 src = inet->inet_rcv_saddr;
+		__u16 destp = ntohs(inet->inet_dport);
+		__u16 srcp = ntohs(inet->inet_sport);
+
+		num = st->num;
+		SmoothedRttEstimate = (tp->srtt >> 3);
+		AcksSent = tp->acks_sent;
+		DupAcksSent = tp->dup_acks_sent;
+		PktsRecv = tp->pkts_recv;
+		DupPktsRecv = tp->dup_pkts_recv;
+
+		sprintf(srcIP, "%lu.%lu.%lu.%lu:%u",
+			((src >> 24) & 0xFF), ((src >> 16) & 0xFF), ((src >> 8) & 0xFF), (src &
0xFF),
+			srcp);
+		sprintf(destIP, "%3d.%3d.%3d.%3d:%u",
+			((dest >> 24) & 0xFF), ((dest >> 16) & 0xFF), ((dest >> 8) & 0xFF),
(dest & 0xFF),
+			destp);
+
+		seq_printf(seq, "%d: %-21s %-21s "
+				"%8lu %8lu %8lu %8lu %8lu%n",
+				num,
+				srcIP,
+				destIP,
+				SmoothedRttEstimate,
+				AcksSent,
+				DupAcksSent,
+				PktsRecv,
+				DupPktsRecv,
+
+				&len
+			);
+
+		seq_printf(seq, "%*s\n", LINESZ - 1 - len, "");
+/*	; //insert read-unlock here */
+	}
+
+out:
+	return 0;
+}
+
 static const struct file_operations tcp_afinfo_seq_fops = {
 	.owner   = THIS_MODULE,
 	.open    = tcp_seq_open,
@@ -2541,6 +2617,15 @@
 	.release = seq_release_net
 };

+static struct tcp_seq_afinfo tcphealth_seq_afinfo = {
+	.name		= "tcphealth",
+	.family		= AF_INET,
+	.seq_fops	= &tcp_afinfo_seq_fops,
+	.seq_ops	= {
+		.show		= tcp_health_seq_show,
+	},
+};
+
 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
 	.name		= "tcp",
 	.family		= AF_INET,
@@ -2552,12 +2637,16 @@

 static int __net_init tcp4_proc_init_net(struct net *net)
 {
-	return tcp_proc_register(net, &tcp4_seq_afinfo);
+	int ret = tcp_proc_register(net, &tcp4_seq_afinfo);
+	if(ret == 0)
+		ret = tcp_proc_register(net, &tcphealth_seq_afinfo);
+	return ret;
 }

 static void __net_exit tcp4_proc_exit_net(struct net *net)
 {
 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
+	tcp_proc_unregister(net, &tcphealth_seq_afinfo);
 }

 static struct pernet_operations tcp4_net_ops = {
diff -rub A/net/ipv4/tcp_output.c B/net/ipv4/tcp_output.c
--- A/net/ipv4/tcp_output.c	2012-06-22 20:37:50.000000000 +0200
+++ B/net/ipv4/tcp_output.c	2012-07-06 17:15:14.000000000 +0200
@@ -2754,8 +2755,15 @@
 	skb_reserve(buff, MAX_TCP_HEADER);
 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);

+		/* If the rcv_nxt has not advanced since sending our last ACK, this is a
duplicate. */
+		if (tcp_sk(sk)->rcv_nxt == tcp_sk(sk)->last_ack_sent)
+			tcp_sk(sk)->dup_acks_sent++;
+		/* Record the total number of acks sent on this connection. */
+		tcp_sk(sk)->acks_sent++;
+
 	/* Send it off, this clears delayed acks for us. */
 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
+	tcp_sk(sk)->last_ack_sent = tcp_sk(sk)->rcv_nxt;
 	tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
 }

^ permalink raw reply	[flat|nested] 24+ messages in thread
* Re: resurrecting tcphealth
@ 2012-07-14  7:56 Piotr Sawuk
  2012-07-14  8:27 ` Eric Dumazet
  2012-07-16 13:32 ` Ben Hutchings
  0 siblings, 2 replies; 24+ messages in thread
From: Piotr Sawuk @ 2012-07-14  7:56 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel

On Sa, 14.07.2012, 03:31, valdis.kletnieks@vt.edu wrote:
> On Fri, 13 Jul 2012 16:55:44 -0700, Stephen Hemminger said:
>
>> >+			/* Course retransmit inefficiency- this packet has been received
>> twice. */
>> >+			tp->dup_pkts_recv++;
>> I don't understand that comment, could you use a better sentence please?
>
> I think what was intended was:
>
> /* Curse you, retransmit inefficiency! This packet has been received at
least twice */
>

LOL, no. I think "course retransmit" is short for "course-grained timeout
caused retransmit" but I can't be sure since I'm not the author of these
lines. I'll replace that comment with the non-shorthand version though.
however, I think the real comment here should be:

/*A perceived shortcoming of the standard TCP implementation: A
TCP receiver can get duplicate packets from the sender because it cannot
acknowledge packets that arrive out of order. These duplicates would happen
when the sender mistakenly thinks some packets have been lost by the network
because it does not receive acks for them but in reality they were
successfully received out of order. Since the receiver has no way of letting
the sender know about the receipt of these packets, they could potentially
be re-sent and re-received at the receiver. Not only do duplicate packets
waste precious Internet bandwidth but they hurt performance because the
sender mistakenly detects congestion from packet losses. The SACK TCP
extension speci\fcally addresses this issue. A large number of duplicate
packets received would indicate a signi\fcant bene\ft to the wide adoption of
SACK. The duplicatepacketsreceived metric is computed at the
receiver and counts these packets on a per-connection basis.*/

as copied from his thesis at [1]. also in the thesis he writes:

In our limited experiment, the results indicated no duplicate packets were
received on any connection in the 18 hour run. This leads us to several
conclusions. Since duplicate ACKs were seen on many connections we know that
some packets were lost or reordered, but unACKed reordered packets never
caused a /coursegrainedtimeouts/ on our connections. Only these timeouts
will cause duplicate packets to be received since less severe out-of-order
conditions will be resolved with fast retransmits. The lack of course
timeouts
may be due to the quality of UCSD's ActiveWeb network or the paucity of
large gaps between received packet groups. It should be noted that Linux 2.2
implements fast retransmits for up to two packet gaps, thus reducing the
need for course grained timeouts due to the lack of SACK.

[1] https://sacerdoti.org/tcphealth/tcphealth-paper.pdf

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2012-07-21 10:34 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-07-12 20:55 resurrecting tcphealth Piotr Sawuk
2012-07-12 21:35 ` Stephen Hemminger
2012-07-12 22:29 ` Randy Dunlap
2012-07-14 21:48 ` Stephen Hemminger
2012-07-14 23:43   ` Piotr Sawuk
2012-07-15  7:16     ` Eric Dumazet
2012-07-15  9:17       ` Piotr Sawuk
2012-07-15  9:53         ` Eric Dumazet
2012-07-15 22:17           ` Piotr Sawuk
  -- strict thread matches above, loose matches on Subject: below --
2012-07-13  7:33 Piotr Sawuk
2012-07-13 23:55 ` Stephen Hemminger
2012-07-14  1:31   ` valdis.kletnieks
2012-07-16 11:33   ` Piotr Sawuk
2012-07-16 11:46     ` Eric Dumazet
2012-07-16 13:03       ` Piotr Sawuk
2012-07-20 14:06         ` Yuchung Cheng
2012-07-21 10:34           ` Piotr Sawuk
2012-07-14  7:56 Piotr Sawuk
2012-07-14  8:27 ` Eric Dumazet
2012-07-14 19:29   ` David Miller
2012-07-16 13:32 ` Ben Hutchings
2012-07-16 15:12   ` Piotr Sawuk
2012-07-16 15:24     ` Christoph Paasch
2012-07-19 10:37       ` Piotr Sawuk

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).