From: Jay Wang <wanjay@amazon.com>
To: Herbert Xu <herbert@gondor.apana.org.au>,
"David S . Miller" <davem@davemloft.net>,
<linux-crypto@vger.kernel.org>
Cc: Jay Wang <jay.wang.upstream@gmail.com>,
Vegard Nossum <vegard.nossum@oracle.com>,
Nicolai Stange <nstange@suse.de>,
Ilia Okomin <ilya.okomin@oracle.com>,
Catalin Marinas <catalin.marinas@arm.com>,
"Will Deacon" <will@kernel.org>,
Thomas Gleixner <tglx@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Borislav Petkov <bp@alien8.de>,
Luis Chamberlain <mcgrof@kernel.org>,
Petr Pavlu <petr.pavlu@suse.com>,
Nathan Chancellor <nathan@kernel.org>,
Nicolas Schier <nsc@kernel.org>,
<linux-arm-kernel@lists.infradead.org>, <x86@kernel.org>,
<linux-kbuild@vger.kernel.org>, <linux-modules@vger.kernel.org>
Subject: [PATCH 05/17] module: allow kernel module loading directly from memory
Date: Thu, 12 Feb 2026 02:42:09 +0000 [thread overview]
Message-ID: <20260212024228.6267-6-wanjay@amazon.com> (raw)
In-Reply-To: <20260212024228.6267-1-wanjay@amazon.com>
From: Vegard Nossum <vegard.nossum@oracle.com>
To enable loading the crypto module earlier before file system is ready,
add a new helper function, load_crypto_module_mem(), which can load a kernel
module from a byte array in memory. When loading in this way, we don't
do signature verification as crypto is not ready yet before loaded.
To tell that a module is loaded in this way, a new module loader flag,
MODULE_INIT_CRYPTO_FROM_MEM, is added.
Co-developed-by: Saeed Mirzamohammadi <saeed.mirzamohammadi@oracle.com>
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
[With code change and revise commit message]
Signed-off-by: Jay Wang <wanjay@amazon.com>
---
include/linux/module.h | 2 +
include/uapi/linux/module.h | 5 ++
kernel/module/main.c | 100 +++++++++++++++++++++++++-----------
kernel/params.c | 3 +-
4 files changed, 79 insertions(+), 31 deletions(-)
diff --git a/include/linux/module.h b/include/linux/module.h
index 20ddfd97630d..22a1d8459ce4 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -589,6 +589,8 @@ struct module {
#ifdef CONFIG_MODULES
+extern int load_crypto_module_mem(const char *mem, size_t size);
+
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h
index 03a33ffffcba..30e9a7813eac 100644
--- a/include/uapi/linux/module.h
+++ b/include/uapi/linux/module.h
@@ -7,4 +7,9 @@
#define MODULE_INIT_IGNORE_VERMAGIC 2
#define MODULE_INIT_COMPRESSED_FILE 4
+#ifdef __KERNEL__
+/* Internal flags */
+#define MODULE_INIT_CRYPTO_FROM_MEM 30
+#endif
+
#endif /* _UAPI_LINUX_MODULE_H */
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 710ee30b3bea..2914e7619766 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -2572,11 +2572,14 @@ static void module_augment_kernel_taints(struct module *mod, struct load_info *i
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
- const char *modmagic = get_modinfo(info, "vermagic");
+ const char *modmagic = NULL;
int err;
- if (flags & MODULE_INIT_IGNORE_VERMAGIC)
- modmagic = NULL;
+ if (flags & MODULE_INIT_CRYPTO_FROM_MEM)
+ return 0;
+
+ if (!(flags & MODULE_INIT_IGNORE_VERMAGIC))
+ modmagic = get_modinfo(info, "vermagic");
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
@@ -3007,7 +3010,7 @@ module_param(async_probe, bool, 0644);
* Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
* helper command 'lx-symbols'.
*/
-static noinline int do_init_module(struct module *mod)
+static noinline int do_init_module(struct module *mod, int flags)
{
int ret = 0;
struct mod_initfree *freeinit;
@@ -3070,8 +3073,10 @@ static noinline int do_init_module(struct module *mod)
ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base,
mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size);
mutex_lock(&module_mutex);
- /* Drop initial reference. */
- module_put(mod);
+ /* Drop initial reference for normal modules to allow unloading.
+ * Keep reference for MODULE_INIT_CRYPTO_FROM_MEM modules to prevent unloading. */
+ if (!(flags & MODULE_INIT_CRYPTO_FROM_MEM))
+ module_put(mod);
trim_init_extable(mod);
#ifdef CONFIG_KALLSYMS
/* Switch to core kallsyms now init is done: kallsyms may be walking! */
@@ -3347,31 +3352,17 @@ static int early_mod_check(struct load_info *info, int flags)
/*
* Allocate and load the module: note that size of section 0 is always
* zero, and we rely on this for optional sections.
+ *
+ * NOTE: module signature verification must have been done already.
*/
-static int load_module(struct load_info *info, const char __user *uargs,
- int flags)
+static int _load_module(struct load_info *info, const char __user *uargs,
+ int flags)
{
struct module *mod;
bool module_allocated = false;
long err = 0;
char *after_dashes;
- /*
- * Do the signature check (if any) first. All that
- * the signature check needs is info->len, it does
- * not need any of the section info. That can be
- * set up later. This will minimize the chances
- * of a corrupt module causing problems before
- * we even get to the signature check.
- *
- * The check will also adjust info->len by stripping
- * off the sig length at the end of the module, making
- * checks against info->len more correct.
- */
- err = module_sig_check(info, flags);
- if (err)
- goto free_copy;
-
/*
* Do basic sanity checks against the ELF header and
* sections. Cache useful sections and set the
@@ -3405,7 +3396,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
* We are tainting your kernel if your module gets into
* the modules linked list somehow.
*/
- module_augment_kernel_taints(mod, info);
+ if (!(flags & MODULE_INIT_CRYPTO_FROM_MEM))
+ module_augment_kernel_taints(mod, info);
/* To avoid stressing percpu allocator, do this once we're unique. */
err = percpu_modalloc(mod, info);
@@ -3452,7 +3444,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
flush_module_icache(mod);
/* Now copy in args */
- mod->args = strndup_user(uargs, ~0UL >> 1);
+ if ((flags & MODULE_INIT_CRYPTO_FROM_MEM))
+ mod->args = kstrdup("", GFP_KERNEL);
+ else
+ mod->args = strndup_user(uargs, ~0UL >> 1);
+
if (IS_ERR(mod->args)) {
err = PTR_ERR(mod->args);
goto free_arch_cleanup;
@@ -3500,13 +3496,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (codetag_load_module(mod))
goto sysfs_cleanup;
- /* Get rid of temporary copy. */
- free_copy(info, flags);
-
/* Done! */
trace_module_load(mod);
- return do_init_module(mod);
+ return do_init_module(mod, flags);
sysfs_cleanup:
mod_sysfs_teardown(mod);
@@ -3562,7 +3555,54 @@ static int load_module(struct load_info *info, const char __user *uargs,
audit_log_kern_module(info->name ? info->name : "?");
mod_stat_bump_becoming(info, flags);
}
+ return err;
+}
+
+/*
+ * Load crypto module from kernel memory without signature check.
+ */
+int __init load_crypto_module_mem(const char *mem, size_t size)
+{
+ int err;
+ struct load_info info = { };
+
+ if (!mem) {
+ pr_err("load_crypto_module_mem: mem parameter is NULL\n");
+ return -EINVAL;
+ }
+
+ info.sig_ok = true;
+ info.hdr = (Elf_Ehdr *) mem;
+ info.len = size;
+
+ err = _load_module(&info, NULL, MODULE_INIT_CRYPTO_FROM_MEM);
+ return err;
+}
+
+static int load_module(struct load_info *info, const char __user *uargs,
+ int flags)
+{
+ int err;
+
+ /*
+ * Do the signature check (if any) first. All that
+ * the signature check needs is info->len, it does
+ * not need any of the section info. That can be
+ * set up later. This will minimize the chances
+ * of a corrupt module causing problems before
+ * we even get to the signature check.
+ *
+ * The check will also adjust info->len by stripping
+ * off the sig length at the end of the module, making
+ * checks against info->len more correct.
+ */
+ err = module_sig_check(info, flags);
+ if (!err)
+ err = _load_module(info, uargs, flags);
+
+ /* Get rid of temporary copy. */
free_copy(info, flags);
+
return err;
}
diff --git a/kernel/params.c b/kernel/params.c
index 7c2242f64bf0..b0671d752ff1 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -967,7 +967,8 @@ static int __init param_sysfs_init(void)
return 0;
}
-subsys_initcall(param_sysfs_init);
+/* Use arch_initcall instead of subsys_initcall for early module loading */
+arch_initcall(param_sysfs_init);
/*
* param_sysfs_builtin_init - add sysfs version and parameter
--
2.47.3
next prev parent reply other threads:[~2026-02-12 2:43 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-12 2:42 [PATCH v1 00/17] crypto: Standalone crypto module (Series 1/4): Core implementation Jay Wang
2026-02-12 2:42 ` [PATCH 01/17] crypto: add Kconfig options for standalone crypto module Jay Wang
2026-02-12 2:42 ` [PATCH 02/17] crypto: add module entry for standalone crypto kernel module Jay Wang
2026-02-12 2:42 ` [PATCH 03/17] build: special compilation rule for building the standalone crypto module Jay Wang
2026-02-12 2:42 ` [PATCH 04/17] build: Add ELF marker for crypto-objs-m modules Jay Wang
2026-02-12 2:42 ` Jay Wang [this message]
2026-02-12 2:42 ` [PATCH 06/17] crypto: add pluggable interface for builtin crypto modules Jay Wang
2026-02-12 2:42 ` [PATCH 07/17] crypto: dedicated ELF sections for collected crypto initcalls Jay Wang
2026-02-12 2:42 ` [PATCH 08/17] crypto: fips140: add crypto module loader Jay Wang
2026-02-12 2:42 ` [PATCH 09/17] build: embed the standalone crypto module into vmlinux Jay Wang
2026-02-12 2:42 ` [PATCH 10/17] build: add CONFIG_DEBUG_INFO_BTF_MODULES support for the standalone crypto kernel module Jay Wang
2026-02-12 2:42 ` [PATCH 11/17] Allow selective crypto module loading at boot based on FIPS mode Jay Wang
2026-02-12 2:42 ` [PATCH 12/17] Execute crypto initcalls during module initialization Jay Wang
2026-02-12 2:42 ` [PATCH 13/17] crypto/algapi.c: skip crypto_check_module_sig() for the standalone crypto module Jay Wang
2026-02-12 2:42 ` [PATCH 14/17] crypto: fips140: add module integrity self-check Jay Wang
2026-02-12 2:42 ` [PATCH 15/17] x86: crypto: to convert exported crypto symbols into pluggable interface for x86 cryptos Jay Wang
2026-02-12 2:42 ` [PATCH 16/17] arm64: crypto: to convert exported crypto symbols into pluggable interface for arm64 cryptos Jay Wang
2026-02-12 2:42 ` [PATCH 17/17] Add standalone crypto kernel module technical documentation Jay Wang
2026-02-25 1:55 ` Eric Biggers
2026-02-25 14:08 ` Christoph Hellwig
2026-02-25 17:35 ` Jay Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260212024228.6267-6-wanjay@amazon.com \
--to=wanjay@amazon.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=davem@davemloft.net \
--cc=herbert@gondor.apana.org.au \
--cc=ilya.okomin@oracle.com \
--cc=jay.wang.upstream@gmail.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kbuild@vger.kernel.org \
--cc=linux-modules@vger.kernel.org \
--cc=mcgrof@kernel.org \
--cc=mingo@redhat.com \
--cc=nathan@kernel.org \
--cc=nsc@kernel.org \
--cc=nstange@suse.de \
--cc=petr.pavlu@suse.com \
--cc=tglx@kernel.org \
--cc=vegard.nossum@oracle.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox