* [PATCH 1/3] crypto: acomp - Move scomp stream allocation code into acomp
2025-03-19 6:04 [PATCH 0/3] crypto: Add SG support to deflate Herbert Xu
@ 2025-03-19 6:04 ` Herbert Xu
2025-03-19 6:04 ` [PATCH 2/3] crypto: acomp - Add acomp_walk Herbert Xu
` (2 subsequent siblings)
3 siblings, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2025-03-19 6:04 UTC (permalink / raw)
To: Linux Crypto Mailing List; +Cc: Ard Biesheuvel
Move the dynamic stream allocation code into acomp and make it
available as a helper for acomp algorithms.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
crypto/acompress.c | 112 +++++++++++++++++++++++
crypto/scompress.c | 133 +++-------------------------
include/crypto/internal/acompress.h | 33 +++++++
include/crypto/internal/scompress.h | 28 ++----
4 files changed, 166 insertions(+), 140 deletions(-)
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 6ef335f5bf27..75fa9be1aa41 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -9,13 +9,18 @@
#include <crypto/internal/acompress.h>
#include <linux/cryptouser.h>
+#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/page-flags.h>
+#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
@@ -445,5 +450,112 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 52b157423ae9..ebcc15be4f41 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -7,7 +7,6 @@
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
#include <linux/cpumask.h>
@@ -132,91 +131,15 @@ static int crypto_scomp_alloc_scratches(void)
return scomp_alloc_scratch(scratch, i);
}
-static void scomp_free_streams(struct scomp_alg *alg)
-{
- struct crypto_acomp_stream __percpu *stream = alg->stream;
- int i;
-
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
-
- if (!ps->ctx)
- continue;
-
- alg->free_ctx(ps->ctx);
- }
-
- free_percpu(stream);
-}
-
-static int scomp_alloc_streams(struct scomp_alg *alg)
-{
- struct crypto_acomp_stream __percpu *stream;
- struct crypto_acomp_stream *ps;
- unsigned int i;
- void *ctx;
-
- stream = alloc_percpu(struct crypto_acomp_stream);
- if (!stream)
- return -ENOMEM;
-
- ctx = alg->alloc_ctx();
- if (IS_ERR(ctx)) {
- free_percpu(stream);
- return PTR_ERR(ctx);
- }
-
- i = cpumask_first(cpu_possible_mask);
- ps = per_cpu_ptr(stream, i);
- ps->ctx = ctx;
-
- for_each_possible_cpu(i) {
- ps = per_cpu_ptr(stream, i);
- spin_lock_init(&ps->lock);
- }
-
- alg->stream = stream;
- return 0;
-}
-
-static void scomp_stream_workfn(struct work_struct *work)
-{
- struct scomp_alg *alg = container_of(work, struct scomp_alg,
- stream_work);
- struct crypto_acomp_stream __percpu *stream = alg->stream;
- int cpu;
-
- for_each_cpu(cpu, &alg->stream_want) {
- struct crypto_acomp_stream *ps;
- void *ctx;
-
- ps = per_cpu_ptr(stream, cpu);
- if (ps->ctx)
- continue;
-
- ctx = alg->alloc_ctx();
- if (IS_ERR(ctx))
- break;
-
- spin_lock_bh(&ps->lock);
- ps->ctx = ctx;
- spin_unlock_bh(&ps->lock);
-
- cpumask_clear_cpu(cpu, &alg->stream_want);
- }
-}
-
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
int ret = 0;
mutex_lock(&scomp_lock);
- if (!alg->stream) {
- ret = scomp_alloc_streams(alg);
- if (ret)
- goto unlock;
- }
+ ret = crypto_acomp_alloc_streams(&alg->streams);
+ if (ret)
+ goto unlock;
if (!scomp_scratch_users) {
ret = crypto_scomp_alloc_scratches();
if (ret)
@@ -229,13 +152,13 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static struct scomp_scratch *scomp_lock_scratch_bh(void) __acquires(scratch)
+static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
{
int cpu = raw_smp_processor_id();
struct scomp_scratch *scratch;
scratch = per_cpu_ptr(&scomp_scratch, cpu);
- spin_lock_bh(&scratch->lock);
+ spin_lock(&scratch->lock);
if (likely(scratch->src))
return scratch;
spin_unlock(&scratch->lock);
@@ -248,39 +171,10 @@ static struct scomp_scratch *scomp_lock_scratch_bh(void) __acquires(scratch)
return scratch;
}
-static inline void scomp_unlock_scratch_bh(struct scomp_scratch *scratch)
+static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
__releases(scratch)
{
- spin_unlock_bh(&scratch->lock);
-}
-
-static struct crypto_acomp_stream *scomp_lock_stream(struct crypto_scomp *tfm)
- __acquires(stream)
-{
- struct scomp_alg *alg = crypto_scomp_alg(tfm);
- struct crypto_acomp_stream __percpu *stream;
- int cpu = raw_smp_processor_id();
- struct crypto_acomp_stream *ps;
-
- stream = alg->stream;
- ps = per_cpu_ptr(stream, cpu);
- spin_lock(&ps->lock);
- if (likely(ps->ctx))
- return ps;
- spin_unlock(&ps->lock);
-
- cpumask_set_cpu(cpu, &alg->stream_want);
- schedule_work(&alg->stream_work);
-
- ps = per_cpu_ptr(stream, cpumask_first(cpu_possible_mask));
- spin_lock(&ps->lock);
- return ps;
-}
-
-static inline void scomp_unlock_stream(struct crypto_acomp_stream *stream)
- __releases(stream)
-{
- spin_unlock(&stream->lock);
+ spin_unlock(&scratch->lock);
}
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
@@ -306,7 +200,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dst || !dlen)
return -EINVAL;
- scratch = scomp_lock_scratch_bh();
+ stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
+ scratch = scomp_lock_scratch();
if (acomp_request_src_isvirt(req))
src = req->svirt;
@@ -367,7 +262,6 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
dlen = min(dlen, max);
}
- stream = scomp_lock_stream(scomp);
if (dir)
ret = crypto_scomp_compress(scomp, src, slen,
dst, &dlen, stream->ctx);
@@ -378,8 +272,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (dst == scratch->dst)
memcpy_to_sglist(req->dst, 0, dst, dlen);
- scomp_unlock_stream(stream);
- scomp_unlock_scratch_bh(scratch);
+ scomp_unlock_scratch(scratch);
+ crypto_acomp_unlock_stream_bh(stream);
req->dlen = dlen;
@@ -466,8 +360,7 @@ static void crypto_scomp_destroy(struct crypto_alg *alg)
{
struct scomp_alg *scomp = __crypto_scomp_alg(alg);
- cancel_work_sync(&scomp->stream_work);
- scomp_free_streams(scomp);
+ crypto_acomp_free_streams(&scomp->streams);
}
static const struct crypto_type crypto_scomp_type = {
@@ -493,8 +386,6 @@ static void scomp_prepare_alg(struct scomp_alg *alg)
comp_prepare_alg(&alg->calg);
base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
-
- INIT_WORK(&alg->stream_work, scomp_stream_workfn);
}
int crypto_register_scomp(struct scomp_alg *alg)
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 2af690819a83..ee5eff19eaf4 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,6 +11,10 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
#define ACOMP_REQUEST_ON_STACK(name, tfm) \
char __##name##_req[sizeof(struct acomp_req) + \
@@ -53,6 +57,24 @@ struct acomp_alg {
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ union {
+ void (*free_ctx)(void *);
+ void (*cfree_ctx)(const void *);
+ };
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
/*
* Transform internal helpers.
*/
@@ -157,4 +179,15 @@ static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
return crypto_tfm_req_chain(&tfm->base);
}
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index fd74e656ffd2..533d6c16a491 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,22 +9,12 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-#include <linux/cpumask_types.h>
-#include <linux/workqueue_types.h>
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
};
-struct crypto_acomp_stream {
- spinlock_t lock;
- void *ctx;
-};
-
/**
* struct scomp_alg - synchronous compression algorithm
*
@@ -33,14 +23,10 @@ struct crypto_acomp_stream {
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
- * @stream_work: Work struct to allocate stream memmory
- * @stream_want: CPU mask for allocating stream memory
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(void);
- void (*free_ctx)(void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -48,9 +34,13 @@ struct scomp_alg {
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
- struct crypto_acomp_stream __percpu *stream;
- struct work_struct stream_work;
- cpumask_t stream_want;
+ union {
+ struct {
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *ctx);
+ };
+ struct crypto_acomp_streams streams;
+ };
union {
struct COMP_ALG_COMMON;
--
2.39.5
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 2/3] crypto: acomp - Add acomp_walk
2025-03-19 6:04 [PATCH 0/3] crypto: Add SG support to deflate Herbert Xu
2025-03-19 6:04 ` [PATCH 1/3] crypto: acomp - Move scomp stream allocation code into acomp Herbert Xu
@ 2025-03-19 6:04 ` Herbert Xu
2025-03-19 6:04 ` [PATCH 3/3] crypto: deflate - Convert to acomp Herbert Xu
2025-03-20 7:51 ` [PATCH 0/3] crypto: Add SG support to deflate Ard Biesheuvel
3 siblings, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2025-03-19 6:04 UTC (permalink / raw)
To: Linux Crypto Mailing List; +Cc: Ard Biesheuvel
Add acomp_walk which is similar to skcipher_walk but tailored for
acomp.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
crypto/acompress.c | 116 ++++++++++++++++++++++++++++
include/crypto/internal/acompress.h | 44 +++++++++++
2 files changed, 160 insertions(+)
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 75fa9be1aa41..ac64e78f3b08 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -8,6 +8,7 @@
*/
#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
@@ -15,6 +16,8 @@
#include <linux/module.h>
#include <linux/page-flags.h>
#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -27,6 +30,14 @@
struct crypto_scomp;
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_SRC_FOLIO = 1 << 2,
+ ACOMP_WALK_DST_LINEAR = 1 << 3,
+ ACOMP_WALK_DST_FOLIO = 1 << 4,
+};
+
static const struct crypto_type crypto_acomp_type;
static void acomp_reqchain_done(void *data, int err);
@@ -557,5 +568,110 @@ struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
}
EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ else if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)) {
+ src = &req->chain.ssg;
+ sg_init_table(src, 1);
+ sg_set_folio(src, req->sfolio, walk->slen, req->soff);
+ }
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+ else if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO)) {
+ dst = &req->chain.dsg;
+ sg_init_table(dst, 1);
+ sg_set_folio(dst, req->dfolio, walk->dlen, req->doff);
+ }
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index ee5eff19eaf4..fbbff9a8a2d9 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,6 +11,7 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
#include <linux/compiler_types.h>
#include <linux/cpumask_types.h>
#include <linux/spinlock.h>
@@ -75,6 +76,37 @@ struct crypto_acomp_streams {
cpumask_t stream_want;
};
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -190,4 +222,16 @@ static inline void crypto_acomp_unlock_stream_bh(
{
spin_unlock_bh(&stream->lock);
}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
#endif
--
2.39.5
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 3/3] crypto: deflate - Convert to acomp
2025-03-19 6:04 [PATCH 0/3] crypto: Add SG support to deflate Herbert Xu
2025-03-19 6:04 ` [PATCH 1/3] crypto: acomp - Move scomp stream allocation code into acomp Herbert Xu
2025-03-19 6:04 ` [PATCH 2/3] crypto: acomp - Add acomp_walk Herbert Xu
@ 2025-03-19 6:04 ` Herbert Xu
2025-03-20 7:51 ` [PATCH 0/3] crypto: Add SG support to deflate Ard Biesheuvel
3 siblings, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2025-03-19 6:04 UTC (permalink / raw)
To: Linux Crypto Mailing List; +Cc: Ard Biesheuvel
This based on work by Ard Biesheuvel <ardb@kernel.org>.
Convert deflate from scomp to acomp. This removes the need for
the caller to linearise the source and destination.
Link: https://lore.kernel.org/all/20230718125847.3869700-21-ardb@kernel.org/
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
crypto/deflate.c | 405 ++++++++++++++++++++++++-----------------------
1 file changed, 208 insertions(+), 197 deletions(-)
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 5c346c544093..bc76c343a0cf 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -6,246 +6,255 @@
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- *
- * The default winbits of 11 should suit most packets, and it may be something
- * to configure on a per-tfm basis in the future.
- *
- * Currently, compression history is not maintained between tfm calls, as
- * it is not needed for IPCOMP and keeps the code simpler. It can be
- * implemented if someone wants it.
+ * Copyright (c) 2023 Google, LLC. <ardb@kernel.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
-struct deflate_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
+struct deflate_stream {
+ struct z_stream_s stream;
+ u8 workspace[];
};
-static int deflate_comp_init(struct deflate_ctx *ctx)
+static DEFINE_MUTEX(deflate_stream_lock);
+
+static void *deflate_alloc_stream(void)
{
- int ret = 0;
- struct z_stream_s *stream = &ctx->comp_stream;
+ size_t size = max(zlib_inflate_workspacesize(),
+ zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS,
+ DEFLATE_DEF_MEMLEVEL));
+ struct deflate_stream *ctx;
- stream->workspace = vzalloc(zlib_deflate_workspacesize(
- -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL));
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
-
-static int deflate_decomp_init(struct deflate_ctx *ctx)
-{
- int ret = 0;
- struct z_stream_s *stream = &ctx->decomp_stream;
-
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
-
-static void deflate_comp_exit(struct deflate_ctx *ctx)
-{
- zlib_deflateEnd(&ctx->comp_stream);
- vfree(ctx->comp_stream.workspace);
-}
-
-static void deflate_decomp_exit(struct deflate_ctx *ctx)
-{
- zlib_inflateEnd(&ctx->decomp_stream);
- vfree(ctx->decomp_stream.workspace);
-}
-
-static int __deflate_init(void *ctx)
-{
- int ret;
-
- ret = deflate_comp_init(ctx);
- if (ret)
- goto out;
- ret = deflate_decomp_init(ctx);
- if (ret)
- deflate_comp_exit(ctx);
-out:
- return ret;
-}
-
-static void *deflate_alloc_ctx(void)
-{
- struct deflate_ctx *ctx;
- int ret;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
- ret = __deflate_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ ctx->stream.workspace = ctx->workspace;
return ctx;
}
-static void __deflate_exit(void *ctx)
+static struct crypto_acomp_streams deflate_streams = {
+ .alloc_ctx = deflate_alloc_stream,
+ .cfree_ctx = kvfree,
+};
+
+static int deflate_compress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
- deflate_comp_exit(ctx);
- deflate_decomp_exit(ctx);
+ struct z_stream_s *stream = &ds->stream;
+ struct acomp_walk walk;
+ int ret;
+
+ ret = acomp_walk_virt(&walk, req);
+ if (ret)
+ return ret;
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur)
+ return -ENOSPC;
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ do {
+ int flush = Z_FINISH;
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ if (acomp_walk_more_src(&walk, scur))
+ flush = Z_NO_FLUSH;
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ ret = zlib_deflate(stream, flush);
+
+ if (scur) {
+ scur -= stream->avail_in;
+ acomp_walk_done_src(&walk, scur);
+ }
+ } while (ret == Z_OK && stream->avail_out);
+
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static void deflate_free_ctx(void *ctx)
+static int deflate_compress(struct acomp_req *req)
{
- __deflate_exit(ctx);
- kfree_sensitive(ctx);
-}
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ struct acomp_req *r2;
+ int err;
-static int __deflate_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->comp_stream;
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
+ err = deflate_compress_one(req, ds);
+ req->base.err = err;
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
+ list_for_each_entry(r2, &req->base.list, base.list) {
+ zlib_deflateReset(&ds->stream);
+ r2->base.err = deflate_compress_one(r2, ds);
}
- ret = 0;
- *dlen = stream->total_out;
+
out:
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
+}
+
+static int deflate_decompress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
+{
+ struct z_stream_s *stream = &ds->stream;
+ bool out_of_space = false;
+ struct acomp_walk walk;
+ int ret;
+
+ ret = acomp_walk_virt(&walk, req);
+ if (ret)
+ return ret;
+
+ do {
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ out_of_space = true;
+ break;
+ }
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ ret = zlib_inflate(stream, Z_NO_FLUSH);
+
+ dcur -= stream->avail_out;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK && stream->avail_in);
+
+ if (scur)
+ acomp_walk_done_src(&walk, scur);
+
+ if (out_of_space)
+ return -ENOSPC;
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
+}
+
+static int deflate_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ struct acomp_req *r2;
+ int err;
+
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
+
+ err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS);
+ if (err != Z_OK) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = deflate_decompress_one(req, ds);
+ req->base.err = err;
+
+ list_for_each_entry(r2, &req->base.list, base.list) {
+ zlib_inflateReset(&ds->stream);
+ r2->base.err = deflate_decompress_one(r2, ds);
+ }
+
+out:
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
+}
+
+static int deflate_init(struct crypto_acomp *tfm)
+{
+ int ret;
+
+ mutex_lock(&deflate_stream_lock);
+ ret = crypto_acomp_alloc_streams(&deflate_streams);
+ mutex_unlock(&deflate_stream_lock);
+
return ret;
}
-static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __deflate_compress(src, slen, dst, dlen, ctx);
-}
-
-static int __deflate_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
-
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->decomp_stream;
-
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out;
- }
-
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an extra
- * byte when being used in the (undocumented) raw deflate mode.
- * (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- u8 zerostuff = 0;
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- }
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
-out:
- return ret;
-}
-
-static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __deflate_decompress(src, slen, dst, dlen, ctx);
-}
-
-static struct scomp_alg scomp = {
- .alloc_ctx = deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
- .cra_module = THIS_MODULE,
- }
+static struct acomp_alg acomp = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .init = deflate_init,
+ .base.cra_name = "deflate",
+ .base.cra_driver_name = "deflate-generic",
+ .base.cra_flags = CRYPTO_ALG_REQ_CHAIN,
+ .base.cra_module = THIS_MODULE,
};
static int __init deflate_mod_init(void)
{
- return crypto_register_scomp(&scomp);
+ return crypto_register_acomp(&acomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&acomp);
+ crypto_acomp_free_streams(&deflate_streams);
}
subsys_initcall(deflate_mod_init);
@@ -254,5 +263,7 @@ module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>");
MODULE_ALIAS_CRYPTO("deflate");
MODULE_ALIAS_CRYPTO("deflate-generic");
--
2.39.5
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH 0/3] crypto: Add SG support to deflate
2025-03-19 6:04 [PATCH 0/3] crypto: Add SG support to deflate Herbert Xu
` (2 preceding siblings ...)
2025-03-19 6:04 ` [PATCH 3/3] crypto: deflate - Convert to acomp Herbert Xu
@ 2025-03-20 7:51 ` Ard Biesheuvel
2025-03-20 8:14 ` Herbert Xu
3 siblings, 1 reply; 9+ messages in thread
From: Ard Biesheuvel @ 2025-03-20 7:51 UTC (permalink / raw)
To: Herbert Xu, Eric Biggers; +Cc: Linux Crypto Mailing List
On Wed, 19 Mar 2025 at 07:05, Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> This patch-series adds SG support to deflate so that IPsec can
> avoid linearising the data.
>
> Herbert Xu (3):
> crypto: acomp - Move scomp stream allocation code into acomp
> crypto: acomp - Add acomp_walk
> crypto: deflate - Convert to acomp
>
IIRC Eric had some feedback at the time regarding the exact behavior
of the zlib API, and I notice that the code no longer deals with
Z_SYNC_FLUSH at all, which I did handle in my version of patch #3.
Do your tests have coverage for all the conditional cases there?
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH 0/3] crypto: Add SG support to deflate
2025-03-20 7:51 ` [PATCH 0/3] crypto: Add SG support to deflate Ard Biesheuvel
@ 2025-03-20 8:14 ` Herbert Xu
2025-03-20 8:55 ` Ard Biesheuvel
0 siblings, 1 reply; 9+ messages in thread
From: Herbert Xu @ 2025-03-20 8:14 UTC (permalink / raw)
To: Ard Biesheuvel
Cc: Eric Biggers, Linux Crypto Mailing List, Sergey Senozhatsky,
YOSHIFUJI Hideaki/吉藤英明
On Thu, Mar 20, 2025 at 08:51:40AM +0100, Ard Biesheuvel wrote:
>
> IIRC Eric had some feedback at the time regarding the exact behavior
> of the zlib API, and I notice that the code no longer deals with
> Z_SYNC_FLUSH at all, which I did handle in my version of patch #3.
I didn't see any feedback regarding this when looking at your patch:
https://lore.kernel.org/linux-crypto/20230718125847.3869700-21-ardb@kernel.org/
Do you have a link to that discussion?
I was going to add the original USAGI workaround but then I
thought perhaps it is no longer necessary as our zlib has
been updated since the workaround was added back in 2003.
My understanding is that the workaround is not about Z_SYNC_FLUSH
but feeding an extra byte to the decompressor. The only difference
between Z_SYNC_FLUSH and Z_FLUSH on inflate is that one would return
Z_OK while the other returns Z_BUF_ERROR, both are treated as an
error by crypto/deflate.c.
> Do your tests have coverage for all the conditional cases there?
If you mean the scatterlists then yes I have coverage for that.
If you mean the USAGI workaround then no because I don't know what
triggered the original problem.
I do note however that zcomp which also contains deflate does not
have this workaround either. If it was really necessary then zram
would have run into it and screamed loudly about not being able to
decompress a page. Or perhaps nobody ever uses zram with deflate.
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 0/3] crypto: Add SG support to deflate
2025-03-20 8:14 ` Herbert Xu
@ 2025-03-20 8:55 ` Ard Biesheuvel
2025-03-20 17:34 ` Eric Biggers
0 siblings, 1 reply; 9+ messages in thread
From: Ard Biesheuvel @ 2025-03-20 8:55 UTC (permalink / raw)
To: Herbert Xu
Cc: Eric Biggers, Linux Crypto Mailing List, Sergey Senozhatsky,
YOSHIFUJI Hideaki/吉藤英明
On Thu, 20 Mar 2025 at 09:14, Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> On Thu, Mar 20, 2025 at 08:51:40AM +0100, Ard Biesheuvel wrote:
> >
> > IIRC Eric had some feedback at the time regarding the exact behavior
> > of the zlib API, and I notice that the code no longer deals with
> > Z_SYNC_FLUSH at all, which I did handle in my version of patch #3.
>
> I didn't see any feedback regarding this when looking at your patch:
>
> https://lore.kernel.org/linux-crypto/20230718125847.3869700-21-ardb@kernel.org/
>
> Do you have a link to that discussion?
>
No. I did some digging but I could find anything. Eric might remember.
> I was going to add the original USAGI workaround but then I
> thought perhaps it is no longer necessary as our zlib has
> been updated since the workaround was added back in 2003.
>
> My understanding is that the workaround is not about Z_SYNC_FLUSH
> but feeding an extra byte to the decompressor. The only difference
> between Z_SYNC_FLUSH and Z_FLUSH on inflate is that one would return
> Z_OK while the other returns Z_BUF_ERROR, both are treated as an
> error by crypto/deflate.c.
>
I'm fine with this, I just wanted to raise it because it jogged my
memory but I can't quite remember the details. So if things are
working as expected, it's all fine with me.
> > Do your tests have coverage for all the conditional cases there?
>
> If you mean the scatterlists then yes I have coverage for that.
>
> If you mean the USAGI workaround then no because I don't know what
> triggered the original problem.
>
> I do note however that zcomp which also contains deflate does not
> have this workaround either. If it was really necessary then zram
> would have run into it and screamed loudly about not being able to
> decompress a page. Or perhaps nobody ever uses zram with deflate.
>
Yeah I meant in general, not the workaround for the mythical USAGI issue :-)
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 0/3] crypto: Add SG support to deflate
2025-03-20 8:55 ` Ard Biesheuvel
@ 2025-03-20 17:34 ` Eric Biggers
2025-03-21 2:36 ` Herbert Xu
0 siblings, 1 reply; 9+ messages in thread
From: Eric Biggers @ 2025-03-20 17:34 UTC (permalink / raw)
To: Ard Biesheuvel
Cc: Herbert Xu, Linux Crypto Mailing List, Sergey Senozhatsky,
YOSHIFUJI Hideaki/吉藤英明
On Thu, Mar 20, 2025 at 09:55:42AM +0100, Ard Biesheuvel wrote:
> On Thu, 20 Mar 2025 at 09:14, Herbert Xu <herbert@gondor.apana.org.au> wrote:
> >
> > On Thu, Mar 20, 2025 at 08:51:40AM +0100, Ard Biesheuvel wrote:
> > >
> > > IIRC Eric had some feedback at the time regarding the exact behavior
> > > of the zlib API, and I notice that the code no longer deals with
> > > Z_SYNC_FLUSH at all, which I did handle in my version of patch #3.
> >
> > I didn't see any feedback regarding this when looking at your patch:
> >
> > https://lore.kernel.org/linux-crypto/20230718125847.3869700-21-ardb@kernel.org/
> >
> > Do you have a link to that discussion?
> >
>
> No. I did some digging but I could find anything. Eric might remember.
I'm not sure what this is referring to.
Then again this patchset doesn't apply, so it's unreviewable anyway.
Just a note, for compression and decompression it's often more efficient to
linearize in the caller. Otherwise the algorithm ends up having to copy the
uncompressed data to an internal buffer anyway. That's needed for the match
finding (compression) and match copying (decompression) to work.
As I mentioned before, the "stream" terminology that Herbert is choosing for
some reason also seems less than ideal. "workspace" would be better. Many of
the compression algorithms don't even support streaming.
> > I was going to add the original USAGI workaround but then I
> > thought perhaps it is no longer necessary as our zlib has
> > been updated since the workaround was added back in 2003.
The kernel's zlib was forked from upstream zlib in the 90s and hasn't been
re-synced since then.
- Eric
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 0/3] crypto: Add SG support to deflate
2025-03-20 17:34 ` Eric Biggers
@ 2025-03-21 2:36 ` Herbert Xu
0 siblings, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2025-03-21 2:36 UTC (permalink / raw)
To: Eric Biggers
Cc: Ard Biesheuvel, Linux Crypto Mailing List, Sergey Senozhatsky,
YOSHIFUJI Hideaki/吉藤英明
On Thu, Mar 20, 2025 at 05:34:50PM +0000, Eric Biggers wrote:
>
> Then again this patchset doesn't apply, so it's unreviewable anyway.
You can test it at
git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git acomp2
> The kernel's zlib was forked from upstream zlib in the 90s and hasn't been
> re-synced since then.
It was updated in 2006, three years after the USAGI workaround:
commit 4f3865fb57a04db7cca068fed1c15badc064a302
Author: Richard Purdie <rpurdie@rpsys.net>
Date: Thu Jun 22 14:47:34 2006 -0700
[PATCH] zlib_inflate: Upgrade library code to a recent version
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 9+ messages in thread