* [IPCOMP] Use per-cpu buffers
@ 2004-09-09 12:22 Herbert Xu
2004-09-09 16:16 ` David S. Miller
0 siblings, 1 reply; 7+ messages in thread
From: Herbert Xu @ 2004-09-09 12:22 UTC (permalink / raw)
To: kuznet, davem, jmorris, netdev
[-- Attachment #1: Type: text/plain, Size: 1052 bytes --]
Hi:
Here is a really ugly patch to get IPCOMP to use per-cpu buffers. But
I'm afraid it really is necessary. At 300K per SA IPCOMP isn't very
affordable at all.
With per-cpu buffers this goes down to 300K per CPU.
I've also turned the kmalloc'ed scratch space into a vmalloc'ed one
since people may be loading the ipcomp module after the system has
been running for a while. On an i386 machine with 64M of RAM or less
this can often cause a 64K kmalloc to fail.
The crypto deflate buffer space are vmalloc'ed already as well.
Part of the ugliness comes from the lazy allocation. However we need
the lazy initialisation since new IPCOMP algorithms may be introduced
in future. That means we can't allocate space for every single IPCOMP
algorithm at module-load time.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
[-- Attachment #2: ipcomp.patch --]
[-- Type: text/plain, Size: 13064 bytes --]
===== include/net/ipcomp.h 1.1 vs edited =====
--- 1.1/include/net/ipcomp.h 2003-05-17 07:02:27 +10:00
+++ edited/include/net/ipcomp.h 2004-09-09 21:28:02 +10:00
@@ -5,8 +5,7 @@
struct ipcomp_data {
u16 threshold;
- u8 *scratch;
- struct crypto_tfm *tfm;
+ struct crypto_tfm **tfms;
};
#endif
===== net/ipv4/ipcomp.c 1.33 vs edited =====
--- 1.33/net/ipv4/ipcomp.c 2004-08-25 20:48:12 +10:00
+++ edited/net/ipv4/ipcomp.c 2004-09-09 21:32:44 +10:00
@@ -16,25 +16,48 @@
#include <linux/config.h>
#include <linux/module.h>
#include <asm/scatterlist.h>
+#include <asm/semaphore.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
#include <net/ipcomp.h>
+struct ipcomp_tfms {
+ struct list_head list;
+ struct crypto_tfm **tfms;
+ int users;
+};
+
+static DECLARE_MUTEX(ipcomp_resource_sem);
+static void **ipcomp_scratches;
+static int ipcomp_scratch_users;
+static LIST_HEAD(ipcomp_tfms_list);
+
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
int err, plen, dlen;
struct iphdr *iph;
struct ipcomp_data *ipcd = x->data;
- u8 *start, *scratch = ipcd->scratch;
+ u8 *start, *scratch;
+ struct crypto_tfm *tfm;
+ int cpu;
plen = skb->len;
dlen = IPCOMP_SCRATCH_SIZE;
start = skb->data;
- err = crypto_comp_decompress(ipcd->tfm, start, plen, scratch, &dlen);
+ cpu = get_cpu();
+ scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
+ tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+
+ err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
if (err)
goto out;
@@ -52,6 +75,7 @@
iph = skb->nh.iph;
iph->tot_len = htons(dlen + iph->ihl * 4);
out:
+ put_cpu();
return err;
}
@@ -97,14 +121,20 @@
int err, plen, dlen, ihlen;
struct iphdr *iph = skb->nh.iph;
struct ipcomp_data *ipcd = x->data;
- u8 *start, *scratch = ipcd->scratch;
+ u8 *start, *scratch;
+ struct crypto_tfm *tfm;
+ int cpu;
ihlen = iph->ihl * 4;
plen = skb->len - ihlen;
dlen = IPCOMP_SCRATCH_SIZE;
start = skb->data + ihlen;
- err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
+ cpu = get_cpu();
+ scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
+ tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+
+ err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
if (err)
goto out;
@@ -114,9 +144,13 @@
}
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
+ put_cpu();
+
pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr));
+ return 0;
out:
+ put_cpu();
return err;
}
@@ -260,12 +294,132 @@
return err;
}
+static void ipcomp_free_scratches(void)
+{
+ int i;
+ void **scratches;
+
+ if (--ipcomp_scratch_users)
+ return;
+
+ scratches = ipcomp_scratches;
+ if (!scratches)
+ return;
+
+ for_each_cpu(i) {
+ void *scratch = *per_cpu_ptr(scratches, i);
+ if (scratch)
+ vfree(scratch);
+ }
+
+ free_percpu(scratches);
+}
+
+static void **ipcomp_alloc_scratches(void)
+{
+ int i;
+ void **scratches;
+
+ if (ipcomp_scratch_users++)
+ return ipcomp_scratches;
+
+ scratches = alloc_percpu(void *);
+ if (!scratches)
+ return NULL;
+
+ ipcomp_scratches = scratches;
+
+ for_each_cpu(i) {
+ void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
+ if (!scratch)
+ return NULL;
+ *per_cpu_ptr(scratches, i) = scratch;
+ }
+
+ return scratches;
+}
+
+static void ipcomp_free_tfms(struct crypto_tfm **tfms)
+{
+ struct ipcomp_tfms *pos;
+ int cpu;
+
+ list_for_each_entry(pos, &ipcomp_tfms_list, list) {
+ if (pos->tfms == tfms)
+ break;
+ }
+
+ BUG_TRAP(pos);
+
+ if (--pos->users)
+ return;
+
+ list_del(&pos->list);
+ kfree(pos);
+
+ if (!tfms)
+ return;
+
+ for_each_cpu(cpu) {
+ struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
+ if (tfm)
+ crypto_free_tfm(tfm);
+ }
+ free_percpu(tfms);
+}
+
+static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
+{
+ struct ipcomp_tfms *pos;
+ struct crypto_tfm **tfms;
+ int cpu;
+
+ /* This can be any valid CPU ID so we don't need locking. */
+ cpu = smp_processor_id();
+
+ list_for_each_entry(pos, &ipcomp_tfms_list, list) {
+ struct crypto_tfm *tfm;
+
+ tfms = pos->tfms;
+ tfm = *per_cpu_ptr(tfms, cpu);
+
+ if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) {
+ pos->users++;
+ return tfms;
+ }
+ }
+
+ pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+ if (!pos)
+ return NULL;
+
+ pos->users = 1;
+ INIT_LIST_HEAD(&pos->list);
+ list_add(&pos->list, &ipcomp_tfms_list);
+
+ pos->tfms = tfms = alloc_percpu(struct crypto_tfm *);
+ if (!tfms)
+ goto error;
+
+ for_each_cpu(cpu) {
+ struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
+ if (!tfm)
+ goto error;
+ *per_cpu_ptr(tfms, cpu) = tfm;
+ }
+
+ return tfms;
+
+error:
+ ipcomp_free_tfms(tfms);
+ return NULL;
+}
+
static void ipcomp_free_data(struct ipcomp_data *ipcd)
{
- if (ipcd->tfm)
- crypto_free_tfm(ipcd->tfm);
- if (ipcd->scratch)
- kfree(ipcd->scratch);
+ if (ipcd->tfms)
+ ipcomp_free_tfms(ipcd->tfms);
+ ipcomp_free_scratches();
}
static void ipcomp_destroy(struct xfrm_state *x)
@@ -274,7 +428,9 @@
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
+ down(&ipcomp_resource_sem);
ipcomp_free_data(ipcd);
+ up(&ipcomp_resource_sem);
kfree(ipcd);
}
@@ -294,25 +450,26 @@
err = -ENOMEM;
ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL);
if (!ipcd)
- goto error;
+ goto out;
memset(ipcd, 0, sizeof(*ipcd));
x->props.header_len = 0;
if (x->props.mode)
x->props.header_len += sizeof(struct iphdr);
- ipcd->scratch = kmalloc(IPCOMP_SCRATCH_SIZE, GFP_KERNEL);
- if (!ipcd->scratch)
+ down(&ipcomp_resource_sem);
+ if (!ipcomp_alloc_scratches())
goto error;
-
- ipcd->tfm = crypto_alloc_tfm(x->calg->alg_name, 0);
- if (!ipcd->tfm)
+
+ ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
+ if (!ipcd->tfms)
goto error;
+ up(&ipcomp_resource_sem);
if (x->props.mode) {
err = ipcomp_tunnel_attach(x);
if (err)
- goto error;
+ goto error_tunnel;
}
calg_desc = xfrm_calg_get_byname(x->calg->alg_name);
@@ -323,11 +480,12 @@
out:
return err;
+error_tunnel:
+ down(&ipcomp_resource_sem);
error:
- if (ipcd) {
- ipcomp_free_data(ipcd);
- kfree(ipcd);
- }
+ ipcomp_free_data(ipcd);
+ up(&ipcomp_resource_sem);
+ kfree(ipcd);
goto out;
}
===== net/ipv6/ipcomp6.c 1.24 vs edited =====
--- 1.24/net/ipv6/ipcomp6.c 2004-08-25 20:48:12 +10:00
+++ edited/net/ipv6/ipcomp6.c 2004-09-09 21:36:23 +10:00
@@ -36,14 +36,31 @@
#include <net/xfrm.h>
#include <net/ipcomp.h>
#include <asm/scatterlist.h>
+#include <asm/semaphore.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <linux/random.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
+struct ipcomp6_tfms {
+ struct list_head list;
+ struct crypto_tfm **tfms;
+ int users;
+};
+
+static DECLARE_MUTEX(ipcomp6_resource_sem);
+static void **ipcomp6_scratches;
+static int ipcomp6_scratch_users;
+static LIST_HEAD(ipcomp6_tfms_list);
+
static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
int err = 0;
@@ -53,7 +70,9 @@
struct ipv6hdr *iph;
int plen, dlen;
struct ipcomp_data *ipcd = x->data;
- u8 *start, *scratch = ipcd->scratch;
+ u8 *start, *scratch;
+ struct crypto_tfm *tfm;
+ int cpu;
if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
skb_linearize(skb, GFP_ATOMIC) != 0) {
@@ -82,20 +101,24 @@
dlen = IPCOMP_SCRATCH_SIZE;
start = skb->data;
- err = crypto_comp_decompress(ipcd->tfm, start, plen, scratch, &dlen);
+ cpu = get_cpu();
+ scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
+ tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+
+ err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
if (err) {
err = -EINVAL;
- goto out;
+ goto out_put_cpu;
}
if (dlen < (plen + sizeof(struct ipv6_comp_hdr))) {
err = -EINVAL;
- goto out;
+ goto out_put_cpu;
}
err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC);
if (err) {
- goto out;
+ goto out_put_cpu;
}
skb_put(skb, dlen - plen);
@@ -104,6 +127,8 @@
iph = skb->nh.ipv6h;
iph->payload_len = htons(skb->len);
+out_put_cpu:
+ put_cpu();
out:
if (tmp_hdr)
kfree(tmp_hdr);
@@ -124,7 +149,9 @@
struct ipv6_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
int plen, dlen;
- u8 *start, *scratch = ipcd->scratch;
+ u8 *start, *scratch;
+ struct crypto_tfm *tfm;
+ int cpu;
hdr_len = skb->h.raw - skb->data;
@@ -144,14 +171,21 @@
dlen = IPCOMP_SCRATCH_SIZE;
start = skb->h.raw;
- err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
+ cpu = get_cpu();
+ scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
+ tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+
+ err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
if (err) {
+ put_cpu();
goto error;
}
if ((dlen + sizeof(struct ipv6_comp_hdr)) >= plen) {
+ put_cpu();
goto out_ok;
}
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
+ put_cpu();
pskb_trim(skb, hdr_len + dlen + sizeof(struct ip_comp_hdr));
/* insert ipcomp header and replace datagram */
@@ -254,12 +288,132 @@
return err;
}
+static void ipcomp6_free_scratches(void)
+{
+ int i;
+ void **scratches;
+
+ if (--ipcomp6_scratch_users)
+ return;
+
+ scratches = ipcomp6_scratches;
+ if (!scratches)
+ return;
+
+ for_each_cpu(i) {
+ void *scratch = *per_cpu_ptr(scratches, i);
+ if (scratch)
+ vfree(scratch);
+ }
+
+ free_percpu(scratches);
+}
+
+static void **ipcomp6_alloc_scratches(void)
+{
+ int i;
+ void **scratches;
+
+ if (ipcomp6_scratch_users++)
+ return ipcomp6_scratches;
+
+ scratches = alloc_percpu(void *);
+ if (!scratches)
+ return NULL;
+
+ ipcomp6_scratches = scratches;
+
+ for_each_cpu(i) {
+ void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
+ if (!scratch)
+ return NULL;
+ *per_cpu_ptr(scratches, i) = scratch;
+ }
+
+ return scratches;
+}
+
+static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
+{
+ struct ipcomp6_tfms *pos;
+ int cpu;
+
+ list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
+ if (pos->tfms == tfms)
+ break;
+ }
+
+ BUG_TRAP(pos);
+
+ if (--pos->users)
+ return;
+
+ list_del(&pos->list);
+ kfree(pos);
+
+ if (!tfms)
+ return;
+
+ for_each_cpu(cpu) {
+ struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
+ if (tfm)
+ crypto_free_tfm(tfm);
+ }
+ free_percpu(tfms);
+}
+
+static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
+{
+ struct ipcomp6_tfms *pos;
+ struct crypto_tfm **tfms;
+ int cpu;
+
+ /* This can be any valid CPU ID so we don't need locking. */
+ cpu = smp_processor_id();
+
+ list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
+ struct crypto_tfm *tfm;
+
+ tfms = pos->tfms;
+ tfm = *per_cpu_ptr(tfms, cpu);
+
+ if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) {
+ pos->users++;
+ return tfms;
+ }
+ }
+
+ pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+ if (!pos)
+ return NULL;
+
+ pos->users = 1;
+ INIT_LIST_HEAD(&pos->list);
+ list_add(&pos->list, &ipcomp6_tfms_list);
+
+ pos->tfms = tfms = alloc_percpu(struct crypto_tfm *);
+ if (!tfms)
+ goto error;
+
+ for_each_cpu(cpu) {
+ struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
+ if (!tfm)
+ goto error;
+ *per_cpu_ptr(tfms, cpu) = tfm;
+ }
+
+ return tfms;
+
+error:
+ ipcomp6_free_tfms(tfms);
+ return NULL;
+}
+
static void ipcomp6_free_data(struct ipcomp_data *ipcd)
{
- if (ipcd->tfm)
- crypto_free_tfm(ipcd->tfm);
- if (ipcd->scratch)
- kfree(ipcd->scratch);
+ if (ipcd->tfms)
+ ipcomp6_free_tfms(ipcd->tfms);
+ ipcomp6_free_scratches();
}
static void ipcomp6_destroy(struct xfrm_state *x)
@@ -268,7 +422,9 @@
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
+ down(&ipcomp6_resource_sem);
ipcomp6_free_data(ipcd);
+ up(&ipcomp6_resource_sem);
kfree(ipcd);
xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
@@ -290,25 +446,26 @@
err = -ENOMEM;
ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL);
if (!ipcd)
- goto error;
+ goto out;
memset(ipcd, 0, sizeof(*ipcd));
x->props.header_len = 0;
if (x->props.mode)
x->props.header_len += sizeof(struct ipv6hdr);
- ipcd->scratch = kmalloc(IPCOMP_SCRATCH_SIZE, GFP_KERNEL);
- if (!ipcd->scratch)
+ down(&ipcomp6_resource_sem);
+ if (!ipcomp6_alloc_scratches())
goto error;
- ipcd->tfm = crypto_alloc_tfm(x->calg->alg_name, 0);
- if (!ipcd->tfm)
+ ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name);
+ if (!ipcd->tfms)
goto error;
+ up(&ipcomp6_resource_sem);
if (x->props.mode) {
err = ipcomp6_tunnel_attach(x);
if (err)
- goto error;
+ goto error_tunnel;
}
calg_desc = xfrm_calg_get_byname(x->calg->alg_name);
@@ -318,11 +475,12 @@
err = 0;
out:
return err;
+error_tunnel:
+ down(&ipcomp6_resource_sem);
error:
- if (ipcd) {
- ipcomp6_free_data(ipcd);
- kfree(ipcd);
- }
+ ipcomp6_free_data(ipcd);
+ up(&ipcomp6_resource_sem);
+ kfree(ipcd);
goto out;
}
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [IPCOMP] Use per-cpu buffers
2004-09-09 12:22 [IPCOMP] Use per-cpu buffers Herbert Xu
@ 2004-09-09 16:16 ` David S. Miller
2004-09-10 11:10 ` Herbert Xu
0 siblings, 1 reply; 7+ messages in thread
From: David S. Miller @ 2004-09-09 16:16 UTC (permalink / raw)
To: Herbert Xu; +Cc: kuznet, jmorris, netdev
On Thu, 9 Sep 2004 22:22:02 +1000
Herbert Xu <herbert@gondor.apana.org.au> wrote:
> With per-cpu buffers this goes down to 300K per CPU.
That amount of space just for decompression state is
rediculious.
I seem to recall that the last time I looked at this
the reason the tables are so huge is that the zlib
we use in the kernel isn't configurable. The table
configuration is compile time decided.
Yes, it's because of linux/zlib.h's cpp settings.
I guess it would be a lot of surgery to make this dynamic.
A second thought is that we may not be the only part
of the kernel interested in a per-cpu zlib scratch
buffer, no?
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [IPCOMP] Use per-cpu buffers
2004-09-09 16:16 ` David S. Miller
@ 2004-09-10 11:10 ` Herbert Xu
2004-09-10 15:42 ` James Morris
2004-09-13 11:18 ` single process pppd for all PPP sessions? [Was [IPCOMP] Use per-cpu buffers] J Chapman
0 siblings, 2 replies; 7+ messages in thread
From: Herbert Xu @ 2004-09-10 11:10 UTC (permalink / raw)
To: David S. Miller; +Cc: kuznet, jmorris, netdev, linux-ppp, paulus
On Thu, Sep 09, 2004 at 09:16:52AM -0700, David S. Miller wrote:
>
> > With per-cpu buffers this goes down to 300K per CPU.
>
> That amount of space just for decompression state is
> rediculious.
Actually most of that space is for compression. The space for
decompression is only 32K if I read the zlib comment correctly.
However, due to the current crypto interface, you always have
to allocate both and you have to reserve a 64K buffer just in
case the packet is large.
There's gotta be a better way though. What if the packet length
field was 32 bits? Surely we aren't going to allocate a 4G buffer :)
James, can you think of a general solution to the 64K buffer in
terms of the crypto decompression interface?
> A second thought is that we may not be the only part
> of the kernel interested in a per-cpu zlib scratch
> buffer, no?
There are two other users. JFFS2 is already using one global copy
accessed through a semaphore. Maybe we should move the IPCOMP
processing into process context as well since it's so slow.
PPP is the other user and allocates one for each device that requests
for deflate compression.
The problem isn't as bad for PPP as it is for IPsec because firstly
PPP is less scalable anyway due to the device/process overhead. More
importantly if compression allocation fails PPP can still carry on.
Unfortunately for IPsec, the compression allocation is done after the
negotiation so there is no room for recovery (you'll have to redo the
negotiation).
On a totally orthogonal topic, has any body thought of doing a PPP
daemon like the IPsec daemons? That is, have one process that manages
all PPP sessions. This could be useful for large L2TP servers and
alike.
Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [IPCOMP] Use per-cpu buffers
2004-09-10 11:10 ` Herbert Xu
@ 2004-09-10 15:42 ` James Morris
2004-09-10 21:55 ` David S. Miller
2004-09-10 22:33 ` Herbert Xu
2004-09-13 11:18 ` single process pppd for all PPP sessions? [Was [IPCOMP] Use per-cpu buffers] J Chapman
1 sibling, 2 replies; 7+ messages in thread
From: James Morris @ 2004-09-10 15:42 UTC (permalink / raw)
To: Herbert Xu; +Cc: David S. Miller, kuznet, netdev, linux-ppp, paulus
On Fri, 10 Sep 2004, Herbert Xu wrote:
> James, can you think of a general solution to the 64K buffer in
> terms of the crypto decompression interface?
I haven't looked at the code for a while, but I think we might be able to
save some memory by specifying compression parameters and making the
appropriate changes to zlib.
- James
--
James Morris
<jmorris@redhat.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [IPCOMP] Use per-cpu buffers
2004-09-10 15:42 ` James Morris
@ 2004-09-10 21:55 ` David S. Miller
2004-09-10 22:33 ` Herbert Xu
1 sibling, 0 replies; 7+ messages in thread
From: David S. Miller @ 2004-09-10 21:55 UTC (permalink / raw)
To: James Morris; +Cc: herbert, kuznet, netdev, linux-ppp, paulus
On Fri, 10 Sep 2004 11:42:00 -0400 (EDT)
James Morris <jmorris@redhat.com> wrote:
> On Fri, 10 Sep 2004, Herbert Xu wrote:
>
> > James, can you think of a general solution to the 64K buffer in
> > terms of the crypto decompression interface?
>
> I haven't looked at the code for a while, but I think we might be able to
> save some memory by specifying compression parameters and making the
> appropriate changes to zlib.
For now I'm going to put in Herbert's per-cpu patch, and we
can try to come up with something better later.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [IPCOMP] Use per-cpu buffers
2004-09-10 15:42 ` James Morris
2004-09-10 21:55 ` David S. Miller
@ 2004-09-10 22:33 ` Herbert Xu
1 sibling, 0 replies; 7+ messages in thread
From: Herbert Xu @ 2004-09-10 22:33 UTC (permalink / raw)
To: James Morris; +Cc: David S. Miller, kuznet, netdev, linux-ppp, paulus
On Fri, Sep 10, 2004 at 11:42:00AM -0400, James Morris wrote:
>
> I haven't looked at the code for a while, but I think we might be able to
> save some memory by specifying compression parameters and making the
> appropriate changes to zlib.
It'd also be nice to add a compression algorithm that requires little
memory to run. That way we can get a clear picture of what interface
changes are OK and what aren't.
Could we perhaps use v44? Is the patent an issue?
Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: single process pppd for all PPP sessions? [Was [IPCOMP] Use per-cpu buffers]
2004-09-10 11:10 ` Herbert Xu
2004-09-10 15:42 ` James Morris
@ 2004-09-13 11:18 ` J Chapman
1 sibling, 0 replies; 7+ messages in thread
From: J Chapman @ 2004-09-13 11:18 UTC (permalink / raw)
To: 'Herbert Xu', 'David S. Miller'
Cc: kuznet, jmorris, netdev, linux-ppp, paulus
Re: anyone think of doing a single process PPP daemon managing all
PPP sessions?
Yes, although nothing started yet. Still working on OpenL2TP...
http://openl2tp.sf.net/
-James
> -----Original Message-----
> From: Herbert Xu [mailto:herbert@gondor.apana.org.au]
> Sent: 10 September 2004 12:11
> To: David S. Miller
> Cc: kuznet@ms2.inr.ac.ru; jmorris@redhat.com; netdev@oss.sgi.com;
linux-ppp@vger.kernel.org; paulus@samba.org
> Subject: Re: [IPCOMP] Use per-cpu buffers
>
[snip]
> On a totally orthogonal topic, has any body thought of doing a PPP
> daemon like the IPsec daemons? That is, have one process that manages
> all PPP sessions. This could be useful for large L2TP servers and
> alike.
>
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2004-09-13 11:18 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-09-09 12:22 [IPCOMP] Use per-cpu buffers Herbert Xu
2004-09-09 16:16 ` David S. Miller
2004-09-10 11:10 ` Herbert Xu
2004-09-10 15:42 ` James Morris
2004-09-10 21:55 ` David S. Miller
2004-09-10 22:33 ` Herbert Xu
2004-09-13 11:18 ` single process pppd for all PPP sessions? [Was [IPCOMP] Use per-cpu buffers] J Chapman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).