* [PATCH v3 1/3] ima: Remove ima_h_table structure
@ 2026-03-11 17:19 Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 2/3] ima: Replace static htable queue with dynamically allocated array Roberto Sassu
` (2 more replies)
0 siblings, 3 replies; 12+ messages in thread
From: Roberto Sassu @ 2026-03-11 17:19 UTC (permalink / raw)
To: corbet, skhan, zohar, dmitry.kasatkin, eric.snowberg, paul,
jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, chenste, nramas, Roberto Sassu
From: Roberto Sassu <roberto.sassu@huawei.com>
With the upcoming change of dynamically allocating and replacing the hash
table, we would need to keep the counters for number of measurements
entries and violations.
Since anyway, those counters don't belong there, remove the ima_h_table
structure instead and move the counters and the hash table as a separate
variables.
Link: https://github.com/linux-integrity/linux/issues/1
Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
Changelog:
v2:
- Not present in this version
v1:
- Not present in this version
---
security/integrity/ima/ima.h | 9 +++------
security/integrity/ima/ima_api.c | 2 +-
security/integrity/ima/ima_fs.c | 19 +++++++++----------
security/integrity/ima/ima_kexec.c | 2 +-
security/integrity/ima/ima_queue.c | 17 ++++++++++-------
5 files changed, 24 insertions(+), 25 deletions(-)
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c38a9eb945b6..1f2c81ec0fba 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -298,12 +298,9 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
*/
extern spinlock_t ima_queue_lock;
-struct ima_h_table {
- atomic_long_t len; /* number of stored measurements in the list */
- atomic_long_t violations;
- struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
-};
-extern struct ima_h_table ima_htable;
+extern atomic_long_t ima_num_entries;
+extern atomic_long_t ima_num_violations;
+extern struct hlist_head ima_htable[IMA_MEASURE_HTABLE_SIZE];
static inline unsigned int ima_hash_key(u8 *digest)
{
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 0916f24f005f..122d127e108d 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -146,7 +146,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
int result;
/* can overflow, only indicator */
- atomic_long_inc(&ima_htable.violations);
+ atomic_long_inc(&ima_num_violations);
result = ima_alloc_init_template(&event_data, &entry, NULL);
if (result < 0) {
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ca4931a95098..aaa460d70ff7 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -38,8 +38,8 @@ __setup("ima_canonical_fmt", default_canonical_fmt_setup);
static int valid_policy = 1;
-static ssize_t ima_show_htable_value(char __user *buf, size_t count,
- loff_t *ppos, atomic_long_t *val)
+static ssize_t ima_show_counter(char __user *buf, size_t count, loff_t *ppos,
+ atomic_long_t *val)
{
char tmpbuf[32]; /* greater than largest 'long' string value */
ssize_t len;
@@ -48,15 +48,14 @@ static ssize_t ima_show_htable_value(char __user *buf, size_t count,
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
}
-static ssize_t ima_show_htable_violations(struct file *filp,
- char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t ima_show_num_violations(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
{
- return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
+ return ima_show_counter(buf, count, ppos, &ima_num_violations);
}
-static const struct file_operations ima_htable_violations_ops = {
- .read = ima_show_htable_violations,
+static const struct file_operations ima_num_violations_ops = {
+ .read = ima_show_num_violations,
.llseek = generic_file_llseek,
};
@@ -64,7 +63,7 @@ static ssize_t ima_show_measurements_count(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
- return ima_show_htable_value(buf, count, ppos, &ima_htable.len);
+ return ima_show_counter(buf, count, ppos, &ima_num_entries);
}
@@ -545,7 +544,7 @@ int __init ima_fs_init(void)
}
dentry = securityfs_create_file("violations", S_IRUSR | S_IRGRP,
- ima_dir, NULL, &ima_htable_violations_ops);
+ ima_dir, NULL, &ima_num_violations_ops);
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
goto out;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 36a34c54de58..5801649fbbef 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -43,7 +43,7 @@ void ima_measure_kexec_event(const char *event_name)
int n;
buf_size = ima_get_binary_runtime_size();
- len = atomic_long_read(&ima_htable.len);
+ len = atomic_long_read(&ima_num_entries);
n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN,
"kexec_segment_size=%lu;ima_binary_runtime_size=%lu;"
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 319522450854..4837fc6d9ada 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -32,11 +32,14 @@ static unsigned long binary_runtime_size;
static unsigned long binary_runtime_size = ULONG_MAX;
#endif
+/* num of stored meas. in the list */
+atomic_long_t ima_num_entries = ATOMIC_LONG_INIT(0);
+/* num of violations in the list */
+atomic_long_t ima_num_violations = ATOMIC_LONG_INIT(0);
+
/* key: inode (before secure-hashing a file) */
-struct ima_h_table ima_htable = {
- .len = ATOMIC_LONG_INIT(0),
- .violations = ATOMIC_LONG_INIT(0),
- .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
+struct hlist_head ima_htable[IMA_MEASURE_HTABLE_SIZE] = {
+ [0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
};
/* mutex protects atomicity of extending measurement list
@@ -61,7 +64,7 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
key = ima_hash_key(digest_value);
rcu_read_lock();
- hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
+ hlist_for_each_entry_rcu(qe, &ima_htable[key], hnext) {
rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
digest_value, hash_digest_size[ima_hash_algo]);
if ((rc == 0) && (qe->entry->pcr == pcr)) {
@@ -113,10 +116,10 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
INIT_LIST_HEAD(&qe->later);
list_add_tail_rcu(&qe->later, &ima_measurements);
- atomic_long_inc(&ima_htable.len);
+ atomic_long_inc(&ima_num_entries);
if (update_htable) {
key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
- hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable[key]);
}
if (binary_runtime_size != ULONG_MAX) {
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v3 2/3] ima: Replace static htable queue with dynamically allocated array
2026-03-11 17:19 [PATCH v3 1/3] ima: Remove ima_h_table structure Roberto Sassu
@ 2026-03-11 17:19 ` Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 3/3] ima: Add support for staging measurements for deletion Roberto Sassu
2026-03-17 19:15 ` [PATCH v3 1/3] ima: Remove ima_h_table structure Mimi Zohar
2 siblings, 0 replies; 12+ messages in thread
From: Roberto Sassu @ 2026-03-11 17:19 UTC (permalink / raw)
To: corbet, skhan, zohar, dmitry.kasatkin, eric.snowberg, paul,
jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, chenste, nramas, Roberto Sassu
From: Roberto Sassu <roberto.sassu@huawei.com>
The IMA hash table is a fixed-size array of hlist_head buckets:
struct hlist_head ima_htable[IMA_MEASURE_HTABLE_SIZE];
IMA_MEASURE_HTABLE_SIZE is (1 << IMA_HASH_BITS) = 1024 buckets, each a
struct hlist_head (one pointer, 8 bytes on 64-bit). That is 8 KiB allocated
in BSS for every kernel, regardless of whether IMA is ever used, and
regardless of how many measurements are actually made.
Replace the fixed-size array with a RCU-protected pointer to a dynamically
allocated array that is initialized in ima_init_htable(), which is called
from ima_init() during early boot. ima_init_htable() calls the static
function ima_alloc_replace_htable() which, other than initializing the hash
table the first time, can also hot-swap the existing hash table with a
blank one.
The allocation in ima_alloc_replace_htable() uses kcalloc() so the buckets
are zero-initialised (equivalent to HLIST_HEAD_INIT { .first = NULL }).
Callers of ima_alloc_replace_htable() must call synchronize_rcu() and free
the returned hash table.
Finally, access the hash table with rcu_dereference() in
ima_lookup_digest_entry() (reader side) and with
rcu_dereference_protected() in ima_add_digest_entry() (writer side).
No functional change: bucket count, hash function, and all locking remain
identical.
Link: https://github.com/linux-integrity/linux/issues/1
Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
Changelog:
v2:
- Not present in this version
v1:
- Not present in this version
---
security/integrity/ima/ima.h | 3 +-
security/integrity/ima/ima_init.c | 5 +++
security/integrity/ima/ima_queue.c | 49 +++++++++++++++++++++++++++---
3 files changed, 51 insertions(+), 6 deletions(-)
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 1f2c81ec0fba..ccd037d49de7 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -285,6 +285,7 @@ bool ima_template_has_modsig(const struct ima_template_desc *ima_template);
int ima_restore_measurement_entry(struct ima_template_entry *entry);
int ima_restore_measurement_list(loff_t bufsize, void *buf);
int ima_measurements_show(struct seq_file *m, void *v);
+int __init ima_init_htable(void);
unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
void ima_init_template_list(void);
@@ -300,7 +301,7 @@ extern spinlock_t ima_queue_lock;
extern atomic_long_t ima_num_entries;
extern atomic_long_t ima_num_violations;
-extern struct hlist_head ima_htable[IMA_MEASURE_HTABLE_SIZE];
+extern struct hlist_head __rcu *ima_htable;
static inline unsigned int ima_hash_key(u8 *digest)
{
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index a2f34f2d8ad7..7e0aa09a12e6 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -140,6 +140,11 @@ int __init ima_init(void)
rc = ima_init_digests();
if (rc != 0)
return rc;
+
+ rc = ima_init_htable();
+ if (rc != 0)
+ return rc;
+
rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
if (rc != 0)
return rc;
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 4837fc6d9ada..2050b9d21e70 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -38,9 +38,7 @@ atomic_long_t ima_num_entries = ATOMIC_LONG_INIT(0);
atomic_long_t ima_num_violations = ATOMIC_LONG_INIT(0);
/* key: inode (before secure-hashing a file) */
-struct hlist_head ima_htable[IMA_MEASURE_HTABLE_SIZE] = {
- [0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
-};
+struct hlist_head __rcu *ima_htable;
/* mutex protects atomicity of extending measurement list
* and extending the TPM PCR aggregate. Since tpm_extend can take
@@ -54,17 +52,54 @@ static DEFINE_MUTEX(ima_extend_list_mutex);
*/
static bool ima_measurements_suspended;
+/* Callers must call synchronize_rcu() and free the hash table. */
+static struct hlist_head *ima_alloc_replace_htable(void)
+{
+ struct hlist_head *old_htable, *new_htable;
+
+ /* Initializing to zeros is equivalent to call HLIST_HEAD_INIT. */
+ new_htable = kcalloc(IMA_MEASURE_HTABLE_SIZE, sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!new_htable)
+ return ERR_PTR(-ENOMEM);
+
+ old_htable = rcu_replace_pointer(ima_htable, new_htable,
+ lockdep_is_held(&ima_extend_list_mutex));
+
+ return old_htable;
+}
+
+int __init ima_init_htable(void)
+{
+ struct hlist_head *old_htable;
+
+ mutex_lock(&ima_extend_list_mutex);
+ old_htable = ima_alloc_replace_htable();
+ mutex_unlock(&ima_extend_list_mutex);
+
+ /* Synchronize_rcu() and kfree() not necessary, only for robustness. */
+ synchronize_rcu();
+
+ if (IS_ERR(old_htable))
+ return PTR_ERR(old_htable);
+
+ kfree(old_htable);
+ return 0;
+}
+
/* lookup up the digest value in the hash table, and return the entry */
static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
int pcr)
{
struct ima_queue_entry *qe, *ret = NULL;
+ struct hlist_head *htable;
unsigned int key;
int rc;
key = ima_hash_key(digest_value);
rcu_read_lock();
- hlist_for_each_entry_rcu(qe, &ima_htable[key], hnext) {
+ htable = rcu_dereference(ima_htable);
+ hlist_for_each_entry_rcu(qe, &htable[key], hnext) {
rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
digest_value, hash_digest_size[ima_hash_algo]);
if ((rc == 0) && (qe->entry->pcr == pcr)) {
@@ -104,6 +139,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
bool update_htable)
{
struct ima_queue_entry *qe;
+ struct hlist_head *htable;
unsigned int key;
qe = kmalloc_obj(*qe);
@@ -116,10 +152,13 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
INIT_LIST_HEAD(&qe->later);
list_add_tail_rcu(&qe->later, &ima_measurements);
+ htable = rcu_dereference_protected(ima_htable,
+ lockdep_is_held(&ima_extend_list_mutex));
+
atomic_long_inc(&ima_num_entries);
if (update_htable) {
key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
- hlist_add_head_rcu(&qe->hnext, &ima_htable[key]);
+ hlist_add_head_rcu(&qe->hnext, &htable[key]);
}
if (binary_runtime_size != ULONG_MAX) {
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-11 17:19 [PATCH v3 1/3] ima: Remove ima_h_table structure Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 2/3] ima: Replace static htable queue with dynamically allocated array Roberto Sassu
@ 2026-03-11 17:19 ` Roberto Sassu
2026-03-17 21:03 ` Mimi Zohar
2026-03-17 19:15 ` [PATCH v3 1/3] ima: Remove ima_h_table structure Mimi Zohar
2 siblings, 1 reply; 12+ messages in thread
From: Roberto Sassu @ 2026-03-11 17:19 UTC (permalink / raw)
To: corbet, skhan, zohar, dmitry.kasatkin, eric.snowberg, paul,
jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, chenste, nramas, Roberto Sassu
From: Roberto Sassu <roberto.sassu@huawei.com>
Introduce the ability of staging the IMA measurement list for deletion.
Staging means moving the current content of the measurement list to a
separate location, and allowing users to read and delete it. This causes
the measurement list to be atomically truncated before new measurements can
be added. Staging can be done only once at a time. In the event of kexec(),
staging is reverted and staged entries will be carried over to the new
kernel.
Staged measurements can be deleted entirely, or partially, with the
non-deleted ones added back to the IMA measurements list. This allows the
remote attestation agents to easily separate the measurements that where
verified (staged and deleted) from those that weren't due to the race
between taking a TPM quote and reading the measurements list.
User space is responsible to concatenate the staged IMA measurements list
portions (excluding the measurements added back to the IMA measurements
list) following the temporal order in which the operations were done,
together with the current measurement list. Then, it can send the collected
data to the remote verifiers.
The benefit of staging and deleting is the ability to free precious kernel
memory, in exchange of delegating user space to reconstruct the full
measurement list from the chunks. No trust needs to be given to user space,
since the integrity of the measurement list is protected by the TPM.
By default, staging the measurements list does not alter the hash table.
When staging and deleting are done, IMA is still able to detect collisions
on the staged and later deleted measurement entries, by keeping the entry
digests (only template data are freed).
However, since during the measurements list serialization only the SHA1
digest is passed, and since there are no template data to recalculate the
other digests from, the hash table is currently not populated with digests
from staged/deleted entries after kexec().
Introduce the new kernel option ima_flush_htable to decide whether or not
the digests of staged measurement entries are flushed from the hash table,
when they are deleted. Flushing the hash table is supported only when
deleting all the staged measurements, since in that case the old hash table
can be quickly swapped with a blank one (otherwise entries would have to be
removed one by one for partial deletion).
Then, introduce ascii_runtime_measurements_<algo>_staged and
binary_runtime_measurements_<algo>_staged interfaces to stage and delete
the measurements. Use 'echo A > <IMA interface>' and
'echo D > <IMA interface>' to respectively stage and delete the entire
measurements list. Use 'echo N > <IMA interface>', with N between 1 and
ULONG_MAX - 1, to delete the selected staged portion of the measurements
list.
The ima_measure_users counter (protected by the ima_measure_mutex mutex)
has been introduced to protect access to the measurements list and the
staged part. The open method of all the measurement interfaces has been
extended to allow only one writer at a time or, in alternative, multiple
readers. The write permission is used to stage and delete the measurements,
the read permission to read them. Write requires also the CAP_SYS_ADMIN
capability.
Finally, introduce the binary_lists enum and make binary_runtime_size
and ima_num_entries as arrays, to keep track of their values for the
current IMA measurements list (BINARY), current list plus staged
measurements (BINARY_STAGED) and the cumulative list since IMA
initialization (BINARY_FULL).
Use BINARY in ima_show_measurements_count(), BINARY_STAGED in
ima_add_kexec_buffer() and BINARY_FULL in ima_measure_kexec_event().
It should be noted that the BINARY_FULL counter is not passed through
kexec. Thus, the number of entries included in the kexec critical data
records refers to the entries since the previous kexec records.
Note: This code derives from the Alt-IMA Huawei project, whose license is
GPL-2.0 OR MIT.
Link: https://github.com/linux-integrity/linux/issues/1
Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
Changelog
v2:
- Forbid partial deletion when flushing hash table (suggested by Mimi)
- Ignore ima_flush_htable if CONFIG_IMA_DISABLE_HTABLE is enabled
- BINARY_SIZE_* renamed to BINARY_* for better clarity
- Removed ima_measurements_staged_exist and testing list empty instead
- ima_queue_stage_trim() and ima_queue_delete_staged_trimmed() renamed to
ima_queue_stage() and ima_queue_delete_staged()
- New delete interval [1, ULONG_MAX - 1]
- Rename ima_measure_lock to ima_measure_mutex
- Move seq_open() and seq_release() outside the ima_measure_mutex lock
- Drop ima_measurements_staged_read() and use seq_read() instead
- Optimize create_securityfs_measurement_lists() changes
- New file name format with _staged suffix at the end of the file name
- Use _rcu list variant in ima_dump_measurement_list()
- Remove support for direct trimming and splice the remaining entries to
the active list (suggested by Mimi)
- Hot swap the hash table if flushing is requested
v1:
- Support for direct trimming without staging
- Support unstaging on kexec (requested by Gregory Lumen)
---
.../admin-guide/kernel-parameters.txt | 4 +
security/integrity/ima/ima.h | 17 +-
security/integrity/ima/ima_fs.c | 266 ++++++++++++++++--
security/integrity/ima/ima_kexec.c | 43 ++-
security/integrity/ima/ima_queue.c | 205 +++++++++++++-
5 files changed, 484 insertions(+), 51 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index cb850e5290c2..7a377812aa0a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2345,6 +2345,10 @@ Kernel parameters
Use the canonical format for the binary runtime
measurements, instead of host native format.
+ ima_flush_htable [IMA]
+ Flush the IMA hash table when deleting all the
+ staged measurement entries.
+
ima_hash= [IMA]
Format: { md5 | sha1 | rmd160 | sha256 | sha384
| sha512 | ... }
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index ccd037d49de7..e8aaf1e62139 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -28,6 +28,15 @@ enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
+/*
+ * BINARY: current binary measurements list
+ * BINARY_STAGED: current binary measurements list + staged entries
+ * BINARY_FULL: binary measurements list since IMA init (lost after kexec)
+ */
+enum binary_lists {
+ BINARY, BINARY_STAGED, BINARY_FULL, BINARY__LAST
+};
+
/* digest size for IMA, fits SHA1 or MD5 */
#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
@@ -118,6 +127,7 @@ struct ima_queue_entry {
struct ima_template_entry *entry;
};
extern struct list_head ima_measurements; /* list of all measurements */
+extern struct list_head ima_measurements_staged; /* list of staged meas. */
/* Some details preceding the binary serialized measurement list */
struct ima_kexec_hdr {
@@ -282,11 +292,13 @@ struct ima_template_desc *ima_template_desc_current(void);
struct ima_template_desc *ima_template_desc_buf(void);
struct ima_template_desc *lookup_template_desc(const char *name);
bool ima_template_has_modsig(const struct ima_template_desc *ima_template);
+int ima_queue_stage(void);
+int ima_queue_delete_staged(unsigned long req_value);
int ima_restore_measurement_entry(struct ima_template_entry *entry);
int ima_restore_measurement_list(loff_t bufsize, void *buf);
int ima_measurements_show(struct seq_file *m, void *v);
int __init ima_init_htable(void);
-unsigned long ima_get_binary_runtime_size(void);
+unsigned long ima_get_binary_runtime_size(enum binary_lists binary_list);
int ima_init_template(void);
void ima_init_template_list(void);
int __init ima_init_digests(void);
@@ -299,9 +311,10 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
*/
extern spinlock_t ima_queue_lock;
-extern atomic_long_t ima_num_entries;
+extern atomic_long_t ima_num_entries[BINARY__LAST];
extern atomic_long_t ima_num_violations;
extern struct hlist_head __rcu *ima_htable;
+extern struct mutex ima_extend_list_mutex;
static inline unsigned int ima_hash_key(u8 *digest)
{
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index aaa460d70ff7..cf85b0892275 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -24,7 +24,17 @@
#include "ima.h"
+/*
+ * Requests:
+ * 'A\n': stage the entire measurements list
+ * 'D\n': delete all staged measurements
+ * '[1, ULONG_MAX - 1]\n' delete N measurements entries and unstage the rest
+ */
+#define STAGED_REQ_LENGTH 21
+
static DEFINE_MUTEX(ima_write_mutex);
+static DEFINE_MUTEX(ima_measure_mutex);
+static long ima_measure_users;
bool ima_canonical_fmt;
static int __init default_canonical_fmt_setup(char *str)
@@ -63,7 +73,7 @@ static ssize_t ima_show_measurements_count(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
- return ima_show_counter(buf, count, ppos, &ima_num_entries);
+ return ima_show_counter(buf, count, ppos, &ima_num_entries[BINARY]);
}
@@ -73,14 +83,15 @@ static const struct file_operations ima_measurements_count_ops = {
};
/* returns pointer to hlist_node */
-static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
+static void *_ima_measurements_start(struct seq_file *m, loff_t *pos,
+ struct list_head *head)
{
loff_t l = *pos;
struct ima_queue_entry *qe;
/* we need a lock since pos could point beyond last element */
rcu_read_lock();
- list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ list_for_each_entry_rcu(qe, head, later) {
if (!l--) {
rcu_read_unlock();
return qe;
@@ -90,7 +101,18 @@ static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
return NULL;
}
-static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
+static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ return _ima_measurements_start(m, pos, &ima_measurements);
+}
+
+static void *ima_measurements_staged_start(struct seq_file *m, loff_t *pos)
+{
+ return _ima_measurements_start(m, pos, &ima_measurements_staged);
+}
+
+static void *_ima_measurements_next(struct seq_file *m, void *v, loff_t *pos,
+ struct list_head *head)
{
struct ima_queue_entry *qe = v;
@@ -102,7 +124,18 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
rcu_read_unlock();
(*pos)++;
- return (&qe->later == &ima_measurements) ? NULL : qe;
+ return (&qe->later == head) ? NULL : qe;
+}
+
+static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return _ima_measurements_next(m, v, pos, &ima_measurements);
+}
+
+static void *ima_measurements_staged_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ return _ima_measurements_next(m, v, pos, &ima_measurements_staged);
}
static void ima_measurements_stop(struct seq_file *m, void *v)
@@ -198,16 +231,145 @@ static const struct seq_operations ima_measurments_seqops = {
.show = ima_measurements_show
};
+static int ima_measure_lock(bool write)
+{
+ mutex_lock(&ima_measure_mutex);
+ if ((write && ima_measure_users != 0) ||
+ (!write && ima_measure_users < 0)) {
+ mutex_unlock(&ima_measure_mutex);
+ return -EBUSY;
+ }
+
+ if (write)
+ ima_measure_users--;
+ else
+ ima_measure_users++;
+ mutex_unlock(&ima_measure_mutex);
+ return 0;
+}
+
+static void ima_measure_unlock(bool write)
+{
+ mutex_lock(&ima_measure_mutex);
+ if (write)
+ ima_measure_users++;
+ else
+ ima_measure_users--;
+ mutex_unlock(&ima_measure_mutex);
+}
+
+static int _ima_measurements_open(struct inode *inode, struct file *file,
+ const struct seq_operations *seq_ops)
+{
+ bool write = (file->f_mode & FMODE_WRITE);
+ int ret;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = ima_measure_lock(write);
+ if (ret < 0)
+ return ret;
+
+ ret = seq_open(file, seq_ops);
+ if (ret < 0)
+ ima_measure_unlock(write);
+
+ return ret;
+}
+
static int ima_measurements_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ima_measurments_seqops);
+ return _ima_measurements_open(inode, file, &ima_measurments_seqops);
+}
+
+static int ima_measurements_release(struct inode *inode, struct file *file)
+{
+ bool write = (file->f_mode & FMODE_WRITE);
+ int ret;
+
+ ret = seq_release(inode, file);
+
+ ima_measure_unlock(write);
+
+ return ret;
}
static const struct file_operations ima_measurements_ops = {
.open = ima_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = ima_measurements_release,
+};
+
+static const struct seq_operations ima_measurments_staged_seqops = {
+ .start = ima_measurements_staged_start,
+ .next = ima_measurements_staged_next,
+ .stop = ima_measurements_stop,
+ .show = ima_measurements_show
+};
+
+static int ima_measurements_staged_open(struct inode *inode, struct file *file)
+{
+ return _ima_measurements_open(inode, file,
+ &ima_measurments_staged_seqops);
+}
+
+static ssize_t ima_measurements_staged_write(struct file *file,
+ const char __user *buf,
+ size_t datalen, loff_t *ppos)
+{
+ char req[STAGED_REQ_LENGTH];
+ unsigned long req_value;
+ int ret;
+
+ if (*ppos > 0 || datalen < 2 || datalen > STAGED_REQ_LENGTH)
+ return -EINVAL;
+
+ if (copy_from_user(req, buf, datalen) != 0)
+ return -EFAULT;
+
+ if (req[datalen - 1] != '\n')
+ return -EINVAL;
+
+ req[datalen - 1] = '\0';
+
+ switch (req[0]) {
+ case 'A':
+ if (datalen != 2)
+ return -EINVAL;
+
+ ret = ima_queue_stage();
+ break;
+ case 'D':
+ if (datalen != 2)
+ return -EINVAL;
+
+ ret = ima_queue_delete_staged(ULONG_MAX);
+ break;
+ default:
+ ret = kstrtoul(req, 10, &req_value);
+ if (ret < 0)
+ return ret;
+
+ if (req_value == ULONG_MAX)
+ return -ERANGE;
+
+ ret = ima_queue_delete_staged(req_value);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return datalen;
+}
+
+static const struct file_operations ima_measurements_staged_ops = {
+ .open = ima_measurements_staged_open,
+ .read = seq_read,
+ .write = ima_measurements_staged_write,
+ .llseek = seq_lseek,
+ .release = ima_measurements_release,
};
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
@@ -272,14 +434,37 @@ static const struct seq_operations ima_ascii_measurements_seqops = {
static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ima_ascii_measurements_seqops);
+ return _ima_measurements_open(inode, file,
+ &ima_ascii_measurements_seqops);
}
static const struct file_operations ima_ascii_measurements_ops = {
.open = ima_ascii_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = ima_measurements_release,
+};
+
+static const struct seq_operations ima_ascii_measurements_staged_seqops = {
+ .start = ima_measurements_staged_start,
+ .next = ima_measurements_staged_next,
+ .stop = ima_measurements_stop,
+ .show = ima_ascii_measurements_show
+};
+
+static int ima_ascii_measurements_staged_open(struct inode *inode,
+ struct file *file)
+{
+ return _ima_measurements_open(inode, file,
+ &ima_ascii_measurements_staged_seqops);
+}
+
+static const struct file_operations ima_ascii_measurements_staged_ops = {
+ .open = ima_ascii_measurements_staged_open,
+ .read = seq_read,
+ .write = ima_measurements_staged_write,
+ .llseek = seq_lseek,
+ .release = ima_measurements_release,
};
static ssize_t ima_read_policy(char *path)
@@ -385,10 +570,21 @@ static const struct seq_operations ima_policy_seqops = {
};
#endif
-static int __init create_securityfs_measurement_lists(void)
+static int __init create_securityfs_measurement_lists(bool staging)
{
+ const struct file_operations *ascii_ops = &ima_ascii_measurements_ops;
+ const struct file_operations *binary_ops = &ima_measurements_ops;
+ mode_t permissions = S_IRUSR | S_IRGRP;
+ const char *file_suffix = "";
int count = NR_BANKS(ima_tpm_chip);
+ if (staging) {
+ ascii_ops = &ima_ascii_measurements_staged_ops;
+ binary_ops = &ima_measurements_staged_ops;
+ file_suffix = "_staged";
+ permissions |= S_IWUSR | S_IWGRP;
+ }
+
if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip))
count++;
@@ -398,26 +594,33 @@ static int __init create_securityfs_measurement_lists(void)
struct dentry *dentry;
if (algo == HASH_ALGO__LAST)
- sprintf(file_name, "ascii_runtime_measurements_tpm_alg_%x",
- ima_tpm_chip->allocated_banks[i].alg_id);
+ snprintf(file_name, sizeof(file_name),
+ "ascii_runtime_measurements_tpm_alg_%x%s",
+ ima_tpm_chip->allocated_banks[i].alg_id,
+ file_suffix);
else
- sprintf(file_name, "ascii_runtime_measurements_%s",
- hash_algo_name[algo]);
- dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
+ snprintf(file_name, sizeof(file_name),
+ "ascii_runtime_measurements_%s%s",
+ hash_algo_name[algo], file_suffix);
+ dentry = securityfs_create_file(file_name, permissions,
ima_dir, (void *)(uintptr_t)i,
- &ima_ascii_measurements_ops);
+ ascii_ops);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (algo == HASH_ALGO__LAST)
- sprintf(file_name, "binary_runtime_measurements_tpm_alg_%x",
- ima_tpm_chip->allocated_banks[i].alg_id);
+ snprintf(file_name, sizeof(file_name),
+ "binary_runtime_measurements_tpm_alg_%x%s",
+ ima_tpm_chip->allocated_banks[i].alg_id,
+ file_suffix);
else
- sprintf(file_name, "binary_runtime_measurements_%s",
- hash_algo_name[algo]);
- dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
+ snprintf(file_name, sizeof(file_name),
+ "binary_runtime_measurements_%s%s",
+ hash_algo_name[algo], file_suffix);
+
+ dentry = securityfs_create_file(file_name, permissions,
ima_dir, (void *)(uintptr_t)i,
- &ima_measurements_ops);
+ binary_ops);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
}
@@ -517,7 +720,10 @@ int __init ima_fs_init(void)
goto out;
}
- ret = create_securityfs_measurement_lists();
+ ret = create_securityfs_measurement_lists(false);
+ if (ret == 0)
+ ret = create_securityfs_measurement_lists(true);
+
if (ret != 0)
goto out;
@@ -535,6 +741,20 @@ int __init ima_fs_init(void)
goto out;
}
+ dentry = securityfs_create_symlink("binary_runtime_measurements_staged",
+ ima_dir, "binary_runtime_measurements_sha1_staged", NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = securityfs_create_symlink("ascii_runtime_measurements_staged",
+ ima_dir, "ascii_runtime_measurements_sha1_staged", NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
dentry = securityfs_create_file("runtime_measurements_count",
S_IRUSR | S_IRGRP, ima_dir, NULL,
&ima_measurements_count_ops);
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 5801649fbbef..70ee3a039df2 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -42,8 +42,8 @@ void ima_measure_kexec_event(const char *event_name)
long len;
int n;
- buf_size = ima_get_binary_runtime_size();
- len = atomic_long_read(&ima_num_entries);
+ buf_size = ima_get_binary_runtime_size(BINARY_FULL);
+ len = atomic_long_read(&ima_num_entries[BINARY_FULL]);
n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN,
"kexec_segment_size=%lu;ima_binary_runtime_size=%lu;"
@@ -80,6 +80,17 @@ static int ima_alloc_kexec_file_buf(size_t segment_size)
return 0;
}
+static int ima_dump_measurement(struct ima_kexec_hdr *khdr,
+ struct ima_queue_entry *qe)
+{
+ if (ima_kexec_file.count >= ima_kexec_file.size)
+ return -EINVAL;
+
+ khdr->count++;
+ ima_measurements_show(&ima_kexec_file, qe);
+ return 0;
+}
+
static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
unsigned long segment_size)
{
@@ -95,17 +106,26 @@ static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
memset(&khdr, 0, sizeof(khdr));
khdr.version = 1;
- /* This is an append-only list, no need to hold the RCU read lock */
- list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
- if (ima_kexec_file.count < ima_kexec_file.size) {
- khdr.count++;
- ima_measurements_show(&ima_kexec_file, qe);
- } else {
- ret = -EINVAL;
+ /* It can race with ima_queue_stage() and ima_queue_delete_staged(). */
+ mutex_lock(&ima_extend_list_mutex);
+
+ list_for_each_entry_rcu(qe, &ima_measurements_staged, later,
+ lockdep_is_held(&ima_extend_list_mutex)) {
+ ret = ima_dump_measurement(&khdr, qe);
+ if (ret < 0)
break;
- }
}
+ list_for_each_entry_rcu(qe, &ima_measurements, later,
+ lockdep_is_held(&ima_extend_list_mutex)) {
+ if (!ret)
+ ret = ima_dump_measurement(&khdr, qe);
+ if (ret < 0)
+ break;
+ }
+
+ mutex_unlock(&ima_extend_list_mutex);
+
/*
* fill in reserved space with some buffer details
* (eg. version, buffer size, number of measurements)
@@ -159,7 +179,8 @@ void ima_add_kexec_buffer(struct kimage *image)
else
extra_memory = CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB * 1024;
- binary_runtime_size = ima_get_binary_runtime_size() + extra_memory;
+ binary_runtime_size = ima_get_binary_runtime_size(BINARY_STAGED) +
+ extra_memory;
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
kexec_segment_size = ULONG_MAX;
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 2050b9d21e70..08cd60fa959e 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -22,29 +22,48 @@
#define AUDIT_CAUSE_LEN_MAX 32
+bool ima_flush_htable;
+static int __init ima_flush_htable_setup(char *str)
+{
+ if (IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
+ pr_warn("Hash table not enabled, ignoring request to flush\n");
+ return 1;
+ }
+
+ ima_flush_htable = true;
+ return 1;
+}
+__setup("ima_flush_htable", ima_flush_htable_setup);
+
/* pre-allocated array of tpm_digest structures to extend a PCR */
static struct tpm_digest *digests;
LIST_HEAD(ima_measurements); /* list of all measurements */
+LIST_HEAD(ima_measurements_staged); /* list of staged measurements */
#ifdef CONFIG_IMA_KEXEC
-static unsigned long binary_runtime_size;
+static unsigned long binary_runtime_size[BINARY__LAST];
#else
-static unsigned long binary_runtime_size = ULONG_MAX;
+static unsigned long binary_runtime_size[BINARY__LAST] = {
+ [0 ... BINARY__LAST - 1] = ULONG_MAX
+};
#endif
/* num of stored meas. in the list */
-atomic_long_t ima_num_entries = ATOMIC_LONG_INIT(0);
+atomic_long_t ima_num_entries[BINARY__LAST] = {
+ [0 ... BINARY__LAST - 1] = ATOMIC_LONG_INIT(0)
+};
+
/* num of violations in the list */
atomic_long_t ima_num_violations = ATOMIC_LONG_INIT(0);
/* key: inode (before secure-hashing a file) */
struct hlist_head __rcu *ima_htable;
-/* mutex protects atomicity of extending measurement list
+/* mutex protects atomicity of extending and staging measurement list
* and extending the TPM PCR aggregate. Since tpm_extend can take
* long (and the tpm driver uses a mutex), we can't use the spinlock.
*/
-static DEFINE_MUTEX(ima_extend_list_mutex);
+DEFINE_MUTEX(ima_extend_list_mutex);
/*
* Used internally by the kernel to suspend measurements.
@@ -140,7 +159,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
{
struct ima_queue_entry *qe;
struct hlist_head *htable;
- unsigned int key;
+ unsigned int key, i;
qe = kmalloc_obj(*qe);
if (qe == NULL) {
@@ -155,19 +174,25 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
htable = rcu_dereference_protected(ima_htable,
lockdep_is_held(&ima_extend_list_mutex));
- atomic_long_inc(&ima_num_entries);
+ for (i = 0; i < BINARY__LAST; i++)
+ atomic_long_inc(&ima_num_entries[i]);
+
if (update_htable) {
key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
hlist_add_head_rcu(&qe->hnext, &htable[key]);
}
- if (binary_runtime_size != ULONG_MAX) {
- int size;
+ for (i = 0; i < BINARY__LAST; i++) {
+ if (binary_runtime_size[i] != ULONG_MAX) {
+ int size;
- size = get_binary_runtime_size(entry);
- binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
- binary_runtime_size + size : ULONG_MAX;
+ size = get_binary_runtime_size(entry);
+ binary_runtime_size[i] =
+ (binary_runtime_size[i] < ULONG_MAX - size) ?
+ binary_runtime_size[i] + size : ULONG_MAX;
+ }
}
+
return 0;
}
@@ -176,12 +201,18 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
* entire binary_runtime_measurement list, including the ima_kexec_hdr
* structure.
*/
-unsigned long ima_get_binary_runtime_size(void)
+unsigned long ima_get_binary_runtime_size(enum binary_lists binary_list)
{
- if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+ unsigned long val;
+
+ mutex_lock(&ima_extend_list_mutex);
+ val = binary_runtime_size[binary_list];
+ mutex_unlock(&ima_extend_list_mutex);
+
+ if (val >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
return ULONG_MAX;
else
- return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+ return val + sizeof(struct ima_kexec_hdr);
}
static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
@@ -262,6 +293,150 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
return result;
}
+int ima_queue_stage(void)
+{
+ int ret = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (!list_empty(&ima_measurements_staged)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ if (list_empty(&ima_measurements)) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ list_replace(&ima_measurements, &ima_measurements_staged);
+ INIT_LIST_HEAD(&ima_measurements);
+ atomic_long_set(&ima_num_entries[BINARY], 0);
+ if (IS_ENABLED(CONFIG_IMA_KEXEC))
+ binary_runtime_size[BINARY] = 0;
+out_unlock:
+ mutex_unlock(&ima_extend_list_mutex);
+ return ret;
+}
+
+int ima_queue_delete_staged(unsigned long req_value)
+{
+ unsigned long req_value_copy = req_value;
+ unsigned long size_to_remove = 0, num_to_remove = 0;
+ struct ima_queue_entry *qe, *qe_tmp;
+ struct list_head *cut_pos = NULL;
+ LIST_HEAD(ima_measurements_trim);
+ struct hlist_head *old_queue = NULL;
+ unsigned int i;
+
+ if (req_value == 0) {
+ pr_err("Must delete at least one entry\n");
+ return -EINVAL;
+ }
+
+ if (req_value < ULONG_MAX && ima_flush_htable) {
+ pr_err("Deleting staged N measurements not supported when flushing the hash table is requested\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Safe walk (no concurrent write), not under ima_extend_list_mutex
+ * for performance reasons.
+ */
+ list_for_each_entry(qe, &ima_measurements_staged, later) {
+ size_to_remove += get_binary_runtime_size(qe->entry);
+ num_to_remove++;
+
+ if (req_value < ULONG_MAX && --req_value_copy == 0) {
+ /* qe->later always points to a valid list entry. */
+ cut_pos = &qe->later;
+ break;
+ }
+ }
+
+ if (req_value < ULONG_MAX && req_value_copy > 0)
+ return -ENOENT;
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (list_empty(&ima_measurements_staged)) {
+ mutex_unlock(&ima_extend_list_mutex);
+ return -ENOENT;
+ }
+
+ if (req_value < ULONG_MAX) {
+ /*
+ * ima_dump_measurement_list() does not modify the list,
+ * cut_pos remains the same even if it was computed before
+ * the lock.
+ */
+ __list_cut_position(&ima_measurements_trim,
+ &ima_measurements_staged, cut_pos);
+ } else {
+ list_replace(&ima_measurements_staged, &ima_measurements_trim);
+ INIT_LIST_HEAD(&ima_measurements_staged);
+ }
+
+ atomic_long_sub(num_to_remove, &ima_num_entries[BINARY_STAGED]);
+ atomic_long_add(atomic_long_read(&ima_num_entries[BINARY_STAGED]),
+ &ima_num_entries[BINARY]);
+ atomic_long_set(&ima_num_entries[BINARY_STAGED],
+ atomic_long_read(&ima_num_entries[BINARY]));
+
+ if (IS_ENABLED(CONFIG_IMA_KEXEC)) {
+ binary_runtime_size[BINARY_STAGED] -= size_to_remove;
+ binary_runtime_size[BINARY] +=
+ binary_runtime_size[BINARY_STAGED];
+ binary_runtime_size[BINARY_STAGED] =
+ binary_runtime_size[BINARY];
+ }
+
+ if (ima_flush_htable) {
+ old_queue = ima_alloc_replace_htable();
+ if (IS_ERR(old_queue)) {
+ mutex_unlock(&ima_extend_list_mutex);
+ return PTR_ERR(old_queue);
+ }
+ }
+
+ /*
+ * Splice (prepend) any remaining non-deleted staged entries to the
+ * active list (RCU not needed, there cannot be concurrent readers).
+ */
+ list_splice(&ima_measurements_staged, &ima_measurements);
+ INIT_LIST_HEAD(&ima_measurements_staged);
+ mutex_unlock(&ima_extend_list_mutex);
+
+ if (ima_flush_htable) {
+ synchronize_rcu();
+ kfree(old_queue);
+ }
+
+ list_for_each_entry_safe(qe, qe_tmp, &ima_measurements_trim, later) {
+ /*
+ * Safe to free template_data here without synchronize_rcu()
+ * because the only htable reader, ima_lookup_digest_entry(),
+ * accesses only entry->digests, not template_data. If new
+ * htable readers are added that access template_data, a
+ * synchronize_rcu() is required here.
+ */
+ for (i = 0; i < qe->entry->template_desc->num_fields; i++) {
+ kfree(qe->entry->template_data[i].data);
+ qe->entry->template_data[i].data = NULL;
+ qe->entry->template_data[i].len = 0;
+ }
+
+ list_del(&qe->later);
+
+ /* No leak if !ima_flush_htable, referenced by ima_htable. */
+ if (ima_flush_htable) {
+ kfree(qe->entry->digests);
+ kfree(qe->entry);
+ kfree(qe);
+ }
+ }
+
+ return 0;
+}
+
int ima_restore_measurement_entry(struct ima_template_entry *entry)
{
int result = 0;
--
2.43.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v3 1/3] ima: Remove ima_h_table structure
2026-03-11 17:19 [PATCH v3 1/3] ima: Remove ima_h_table structure Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 2/3] ima: Replace static htable queue with dynamically allocated array Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 3/3] ima: Add support for staging measurements for deletion Roberto Sassu
@ 2026-03-17 19:15 ` Mimi Zohar
2 siblings, 0 replies; 12+ messages in thread
From: Mimi Zohar @ 2026-03-17 19:15 UTC (permalink / raw)
To: Roberto Sassu, corbet, skhan, dmitry.kasatkin, eric.snowberg,
paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, chenste, nramas, Roberto Sassu
On Wed, 2026-03-11 at 18:19 +0100, Roberto Sassu wrote:
> From: Roberto Sassu <roberto.sassu@huawei.com>
>
> With the upcoming change of dynamically allocating and replacing the hash
> table, we would need to keep the counters for number of measurements
> entries and violations.
>
> Since anyway, those counters don't belong there, remove the ima_h_table
> structure instead and move the counters and the hash table as a separate
> variables.
There's no cover letter or motivation in this patch description for needing to
"dynamically allocating or replacing the existing hash table."
Saying that the htable, number of records in the measurement list, and violation
counter don't belong grouped together is insufficient. There must have been a
valid reason for why they were grouped together originally (e.g. never removed
or reset).
Please provide a motivation for removing the ima_h_table struct and its usage
and defining them independently of each other.
thanks,
Mimi
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-11 17:19 ` [PATCH v3 3/3] ima: Add support for staging measurements for deletion Roberto Sassu
@ 2026-03-17 21:03 ` Mimi Zohar
2026-03-19 21:31 ` steven chen
0 siblings, 1 reply; 12+ messages in thread
From: Mimi Zohar @ 2026-03-17 21:03 UTC (permalink / raw)
To: Roberto Sassu, corbet, skhan, dmitry.kasatkin, eric.snowberg,
paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, chenste, nramas, Roberto Sassu
Hi Roberto,
On Wed, 2026-03-11 at 18:19 +0100, Roberto Sassu wrote:
> From: Roberto Sassu <roberto.sassu@huawei.com>
>
> Introduce the ability of staging the IMA measurement list for deletion.
> Staging means moving the current content of the measurement list to a
> separate location, and allowing users to read and delete it. This causes
> the measurement list to be atomically truncated before new measurements can
> be added.
I really like this design of atomically moving and subsequently deleting the
measurement list. However this is a solution, not the motivation for the patch.
Please include the motivation for the patch, before describing the solution.
> Staging can be done only once at a time. In the event of kexec(),
> staging is reverted and staged entries will be carried over to the new
> kernel.
>
> Staged measurements can be deleted entirely, or partially, with the
> non-deleted ones added back to the IMA measurements list.
This patch description is really long, which is an indication that the patch
needs to be split up. Adding support for partially deleting the measurement
list records, by prepending the remaining measurement records, should be a
separate patch.
> This allows the
> remote attestation agents to easily separate the measurements that where
> verified (staged and deleted) from those that weren't due to the race
> between taking a TPM quote and reading the measurements list.
>
> User space is responsible to concatenate the staged IMA measurements list
> portions (excluding the measurements added back to the IMA measurements
> list) following the temporal order in which the operations were done,
> together with the current measurement list. Then, it can send the collected
> data to the remote verifiers.
This belongs in a Documentation patch.
>
> The benefit of staging and deleting is the ability to free precious kernel
> memory,
This is the motivation for the patch.
> in exchange of delegating user space to reconstruct the full
> measurement list from the chunks. No trust needs to be given to user space,
> since the integrity of the measurement list is protected by the TPM.
Agreed the measurement list, itself, is protected by the TPM. However, relying
on userspace to reassemble the chunks is another concern. Support for staging
and deleting the measurement list should be configurable. Defining a Kconfig
should be part of this initial patch.
> By default, staging the measurements list does not alter the hash table.
> When staging and deleting are done, IMA is still able to detect collisions
> on the staged and later deleted measurement entries, by keeping the entry
> digests (only template data are freed).
>
> However, since during the measurements list serialization only the SHA1
> digest is passed, and since there are no template data to recalculate the
> other digests from, the hash table is currently not populated with digests
> from staged/deleted entries after kexec().
>
> Introduce the new kernel option ima_flush_htable to decide whether or not
> the digests of staged measurement entries are flushed from the hash table,
> when they are deleted. Flushing the hash table is supported only when
> deleting all the staged measurements, since in that case the old hash table
> can be quickly swapped with a blank one (otherwise entries would have to be
> removed one by one for partial deletion).
Allowing the hash table to be deleted would be an example of another patch.
>
> Then, introduce ascii_runtime_measurements_<algo>_staged and
> binary_runtime_measurements_<algo>_staged interfaces to stage and delete
> the measurements. Use 'echo A > <IMA interface>' and
> 'echo D > <IMA interface>' to respectively stage and delete the entire
> measurements list. Use 'echo N > <IMA interface>', with N between 1 and
> ULONG_MAX - 1, to delete the selected staged portion of the measurements
> list.
>
> The ima_measure_users counter (protected by the ima_measure_mutex mutex)
> has been introduced to protect access to the measurements list and the
> staged part. The open method of all the measurement interfaces has been
> extended to allow only one writer at a time or, in alternative, multiple
> readers. The write permission is used to stage and delete the measurements,
> the read permission to read them. Write requires also the CAP_SYS_ADMIN
> capability.
Yes, this is part of the initial patch that adds support for staging the
measurement list.
>
> Finally, introduce the binary_lists enum and make binary_runtime_size
> and ima_num_entries as arrays, to keep track of their values for the
> current IMA measurements list (BINARY), current list plus staged
> measurements (BINARY_STAGED) and the cumulative list since IMA
> initialization (BINARY_FULL).
>
> Use BINARY in ima_show_measurements_count(), BINARY_STAGED in
> ima_add_kexec_buffer() and BINARY_FULL in ima_measure_kexec_event().
>
> It should be noted that the BINARY_FULL counter is not passed through
> kexec. Thus, the number of entries included in the kexec critical data
> records refers to the entries since the previous kexec records.
>
> Note: This code derives from the Alt-IMA Huawei project, whose license is
> GPL-2.0 OR MIT.
>
> Link: https://github.com/linux-integrity/linux/issues/1
> Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
The design looks good. As I mentioned above, this patch description is quite
long, which is an indication that the patch needs to be split up. One method of
breaking it up would be:
- (Basic) support for staging measurements for deletion (based on a Kconfig)
- Support for removing the hash table
- Support for deleting N measurement records (and pre-pending the remaining
measurement records)
- Adding documentation
thanks,
Mimi
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-17 21:03 ` Mimi Zohar
@ 2026-03-19 21:31 ` steven chen
2026-03-20 12:41 ` Mimi Zohar
0 siblings, 1 reply; 12+ messages in thread
From: steven chen @ 2026-03-19 21:31 UTC (permalink / raw)
To: Mimi Zohar, Roberto Sassu, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu, steven chen
On 3/17/2026 2:03 PM, Mimi Zohar wrote:
> Hi Roberto,
>
> On Wed, 2026-03-11 at 18:19 +0100, Roberto Sassu wrote:
>> From: Roberto Sassu <roberto.sassu@huawei.com>
>>
>> Introduce the ability of staging the IMA measurement list for deletion.
>> Staging means moving the current content of the measurement list to a
>> separate location, and allowing users to read and delete it. This causes
>> the measurement list to be atomically truncated before new measurements can
>> be added.
> I really like this design of atomically moving and subsequently deleting the
> measurement list. However this is a solution, not the motivation for the patch.
> Please include the motivation for the patch, before describing the solution.
>
>> Staging can be done only once at a time. In the event of kexec(),
>> staging is reverted and staged entries will be carried over to the new
>> kernel.
>> Staged measurements can be deleted entirely, or partially, with the
>> non-deleted ones added back to the IMA measurements list.
> This patch description is really long, which is an indication that the patch
> needs to be split up. Adding support for partially deleting the measurement
> list records, by prepending the remaining measurement records, should be a
> separate patch.
>
>> This allows the
>> remote attestation agents to easily separate the measurements that where
>> verified (staged and deleted) from those that weren't due to the race
>> between taking a TPM quote and reading the measurements list.
>>
>> User space is responsible to concatenate the staged IMA measurements list
>> portions (excluding the measurements added back to the IMA measurements
>> list) following the temporal order in which the operations were done,
>> together with the current measurement list. Then, it can send the collected
>> data to the remote verifiers.
> This belongs in a Documentation patch.
>
>> The benefit of staging and deleting is the ability to free precious kernel
>> memory,
> This is the motivation for the patch.
>
>> in exchange of delegating user space to reconstruct the full
>> measurement list from the chunks. No trust needs to be given to user space,
>> since the integrity of the measurement list is protected by the TPM.
> Agreed the measurement list, itself, is protected by the TPM. However, relying
> on userspace to reassemble the chunks is another concern. Support for staging
> and deleting the measurement list should be configurable. Defining a Kconfig
> should be part of this initial patch.
>
>> By default, staging the measurements list does not alter the hash table.
>> When staging and deleting are done, IMA is still able to detect collisions
>> on the staged and later deleted measurement entries, by keeping the entry
>> digests (only template data are freed).
>>
>> However, since during the measurements list serialization only the SHA1
>> digest is passed, and since there are no template data to recalculate the
>> other digests from, the hash table is currently not populated with digests
>> from staged/deleted entries after kexec().
>>
>> Introduce the new kernel option ima_flush_htable to decide whether or not
>> the digests of staged measurement entries are flushed from the hash table,
>> when they are deleted. Flushing the hash table is supported only when
>> deleting all the staged measurements, since in that case the old hash table
>> can be quickly swapped with a blank one (otherwise entries would have to be
>> removed one by one for partial deletion).
> Allowing the hash table to be deleted would be an example of another patch.
>
>> Then, introduce ascii_runtime_measurements_<algo>_staged and
>> binary_runtime_measurements_<algo>_staged interfaces to stage and delete
>> the measurements. Use 'echo A > <IMA interface>' and
>> 'echo D > <IMA interface>' to respectively stage and delete the entire
>> measurements list. Use 'echo N > <IMA interface>', with N between 1 and
>> ULONG_MAX - 1, to delete the selected staged portion of the measurements
>> list.
>>
>> The ima_measure_users counter (protected by the ima_measure_mutex mutex)
>> has been introduced to protect access to the measurements list and the
>> staged part. The open method of all the measurement interfaces has been
>> extended to allow only one writer at a time or, in alternative, multiple
>> readers. The write permission is used to stage and delete the measurements,
>> the read permission to read them. Write requires also the CAP_SYS_ADMIN
>> capability.
> Yes, this is part of the initial patch that adds support for staging the
> measurement list.
>
>> Finally, introduce the binary_lists enum and make binary_runtime_size
>> and ima_num_entries as arrays, to keep track of their values for the
>> current IMA measurements list (BINARY), current list plus staged
>> measurements (BINARY_STAGED) and the cumulative list since IMA
>> initialization (BINARY_FULL).
>>
>> Use BINARY in ima_show_measurements_count(), BINARY_STAGED in
>> ima_add_kexec_buffer() and BINARY_FULL in ima_measure_kexec_event().
>>
>> It should be noted that the BINARY_FULL counter is not passed through
>> kexec. Thus, the number of entries included in the kexec critical data
>> records refers to the entries since the previous kexec records.
>>
>> Note: This code derives from the Alt-IMA Huawei project, whose license is
>> GPL-2.0 OR MIT.
>>
>> Link: https://github.com/linux-integrity/linux/issues/1
>> Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
> The design looks good. As I mentioned above, this patch description is quite
> long, which is an indication that the patch needs to be split up. One method of
> breaking it up would be:
>
> - (Basic) support for staging measurements for deletion (based on a Kconfig)
> - Support for removing the hash table
Great work on performance improvement and hash table redesign.
If update the following "Trim N" method patch with your current patch
lock time performance
improvement plus the hash table change, "Trim N" method can do the same
kernel
measurement list lock time as staged method do, right?
https://lore.kernel.org/linux-integrity/20260205235849.7086-1-chenste@linux.microsoft.com/
> - Support for deleting N measurement records (and pre-pending the remaining
> measurement records)
Is there any problem to bring work of "stage" step together to the
deletion step?
"Trim N" method does everything that "staged" method can do, right?
what's the "stage"
method can do but "trim N" method can't do?
in user space, if in "staged" state, no other user space agent can
access the IMA measure list, right?
Could you explain the benefit of bringing the "stage" step?
Thanks,
Steven
> - Adding documentation
>
> thanks,
>
> Mimi
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-19 21:31 ` steven chen
@ 2026-03-20 12:41 ` Mimi Zohar
2026-03-20 16:58 ` steven chen
0 siblings, 1 reply; 12+ messages in thread
From: Mimi Zohar @ 2026-03-20 12:41 UTC (permalink / raw)
To: steven chen, Roberto Sassu, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu
On Thu, 2026-03-19 at 14:31 -0700, steven chen wrote:
> > - Support for deleting N measurement records (and pre-pending the remaining
> > measurement records)
>
> Is there any problem to bring work of "stage" step together to the
> deletion step?
>
> "Trim N" method does everything that "staged" method can do, right?
> what's the "stage" method can do but "trim N" method can't do?
>
> in user space, if in "staged" state, no other user space agent can
> access the IMA measure list, right?
>
> Could you explain the benefit of bringing the "stage" step?
The performance improvement is because "staging" the IMA measurement list takes
the lock in order to move the measurement list pointer and then releases it.
New measurements can then be appended to a new measurement list. Deleting
records is done without taking the lock to walk the staged measurement list.
Without staging the measurement list, walking the measurement list to trim N
records requires taking and holding the lock. The performance is dependent on
the size of the measurement list.
Your question isn't really about "staging" the measurement list records, but
requiring a userspace signal to delete them. To answer that question, deleting
N records (third patch) could imply staging all the measurement records and
immediately deleting N records without an explicit userspace signal.
I expect the requested "documentation" patch will provide the motivation for the
delayed deletion of the measurement list.
Mimi
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-20 12:41 ` Mimi Zohar
@ 2026-03-20 16:58 ` steven chen
2026-03-20 17:10 ` Roberto Sassu
0 siblings, 1 reply; 12+ messages in thread
From: steven chen @ 2026-03-20 16:58 UTC (permalink / raw)
To: Mimi Zohar, Roberto Sassu, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu, steven chen
On 3/20/2026 5:41 AM, Mimi Zohar wrote:
> On Thu, 2026-03-19 at 14:31 -0700, steven chen wrote:
>
>>> - Support for deleting N measurement records (and pre-pending the remaining
>>> measurement records)
>> Is there any problem to bring work of "stage" step together to the
>> deletion step?
>>
>> "Trim N" method does everything that "staged" method can do, right?
>> what's the "stage" method can do but "trim N" method can't do?
>>
>> in user space, if in "staged" state, no other user space agent can
>> access the IMA measure list, right?
>>
>> Could you explain the benefit of bringing the "stage" step?
> The performance improvement is because "staging" the IMA measurement list takes
> the lock in order to move the measurement list pointer and then releases it.
> New measurements can then be appended to a new measurement list. Deleting
> records is done without taking the lock to walk the staged measurement list.
>
> Without staging the measurement list, walking the measurement list to trim N
> records requires taking and holding the lock. The performance is dependent on
> the size of the measurement list.
>
> Your question isn't really about "staging" the measurement list records, but
> requiring a userspace signal to delete them. To answer that question, deleting
> N records (third patch) could imply staging all the measurement records and
> immediately deleting N records without an explicit userspace signal.
>
> I expect the requested "documentation" patch will provide the motivation for the
> delayed deletion of the measurement list.
>
> Mimi
"Staging" is great on reducing kernel IMA measurement list locking time.
How about just do "stage N" entries and then delete the staged list in
one shot?
It means merge two APIs into one API
int ima_queue_stage(void)
int ima_queue_delete_staged(unsigned long req_value)
The kernel lock time will be the same. And user space lock time will be
reduced.
Thanks,
Steven
>
>
>
>
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-20 16:58 ` steven chen
@ 2026-03-20 17:10 ` Roberto Sassu
2026-03-20 17:24 ` steven chen
0 siblings, 1 reply; 12+ messages in thread
From: Roberto Sassu @ 2026-03-20 17:10 UTC (permalink / raw)
To: steven chen, Mimi Zohar, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu
On Fri, 2026-03-20 at 09:58 -0700, steven chen wrote:
> On 3/20/2026 5:41 AM, Mimi Zohar wrote:
> > On Thu, 2026-03-19 at 14:31 -0700, steven chen wrote:
> >
> > > > - Support for deleting N measurement records (and pre-pending the remaining
> > > > measurement records)
> > > Is there any problem to bring work of "stage" step together to the
> > > deletion step?
> > >
> > > "Trim N" method does everything that "staged" method can do, right?
> > > what's the "stage" method can do but "trim N" method can't do?
> > >
> > > in user space, if in "staged" state, no other user space agent can
> > > access the IMA measure list, right?
> > >
> > > Could you explain the benefit of bringing the "stage" step?
> > The performance improvement is because "staging" the IMA measurement list takes
> > the lock in order to move the measurement list pointer and then releases it.
> > New measurements can then be appended to a new measurement list. Deleting
> > records is done without taking the lock to walk the staged measurement list.
> >
> > Without staging the measurement list, walking the measurement list to trim N
> > records requires taking and holding the lock. The performance is dependent on
> > the size of the measurement list.
> >
> > Your question isn't really about "staging" the measurement list records, but
> > requiring a userspace signal to delete them. To answer that question, deleting
> > N records (third patch) could imply staging all the measurement records and
> > immediately deleting N records without an explicit userspace signal.
> >
> > I expect the requested "documentation" patch will provide the motivation for the
> > delayed deletion of the measurement list.
> >
> > Mimi
>
> "Staging" is great on reducing kernel IMA measurement list locking time.
>
> How about just do "stage N" entries and then delete the staged list in
> one shot?
> It means merge two APIs into one API
> int ima_queue_stage(void)
> int ima_queue_delete_staged(unsigned long req_value)
>
> The kernel lock time will be the same. And user space lock time will be
> reduced.
It is not the same. The walk on the staged list is done without holding
ima_extend_list_mutex.
Roberto
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-20 17:10 ` Roberto Sassu
@ 2026-03-20 17:24 ` steven chen
2026-03-20 17:26 ` Roberto Sassu
0 siblings, 1 reply; 12+ messages in thread
From: steven chen @ 2026-03-20 17:24 UTC (permalink / raw)
To: Roberto Sassu, Mimi Zohar, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu, steven chen
On 3/20/2026 10:10 AM, Roberto Sassu wrote:
> On Fri, 2026-03-20 at 09:58 -0700, steven chen wrote:
>> On 3/20/2026 5:41 AM, Mimi Zohar wrote:
>>> On Thu, 2026-03-19 at 14:31 -0700, steven chen wrote:
>>>
>>>>> - Support for deleting N measurement records (and pre-pending the remaining
>>>>> measurement records)
>>>> Is there any problem to bring work of "stage" step together to the
>>>> deletion step?
>>>>
>>>> "Trim N" method does everything that "staged" method can do, right?
>>>> what's the "stage" method can do but "trim N" method can't do?
>>>>
>>>> in user space, if in "staged" state, no other user space agent can
>>>> access the IMA measure list, right?
>>>>
>>>> Could you explain the benefit of bringing the "stage" step?
>>> The performance improvement is because "staging" the IMA measurement list takes
>>> the lock in order to move the measurement list pointer and then releases it.
>>> New measurements can then be appended to a new measurement list. Deleting
>>> records is done without taking the lock to walk the staged measurement list.
>>>
>>> Without staging the measurement list, walking the measurement list to trim N
>>> records requires taking and holding the lock. The performance is dependent on
>>> the size of the measurement list.
>>>
>>> Your question isn't really about "staging" the measurement list records, but
>>> requiring a userspace signal to delete them. To answer that question, deleting
>>> N records (third patch) could imply staging all the measurement records and
>>> immediately deleting N records without an explicit userspace signal.
>>>
>>> I expect the requested "documentation" patch will provide the motivation for the
>>> delayed deletion of the measurement list.
>>>
>>> Mimi
>> "Staging" is great on reducing kernel IMA measurement list locking time.
>>
>> How about just do "stage N" entries and then delete the staged list in
>> one shot?
>> It means merge two APIs into one API
>> int ima_queue_stage(void)
>> int ima_queue_delete_staged(unsigned long req_value)
>>
>> The kernel lock time will be the same. And user space lock time will be
>> reduced.
> It is not the same. The walk on the staged list is done without holding
> ima_extend_list_mutex.
>
> Roberto
Is it possible to merge two APIs work into one API?
int ima_queue_stage(void)
int ima_queue_delete_staged(unsigned long req_value)
Thank,
Steven
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-20 17:24 ` steven chen
@ 2026-03-20 17:26 ` Roberto Sassu
2026-03-20 17:40 ` steven chen
0 siblings, 1 reply; 12+ messages in thread
From: Roberto Sassu @ 2026-03-20 17:26 UTC (permalink / raw)
To: steven chen, Mimi Zohar, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu
On Fri, 2026-03-20 at 10:24 -0700, steven chen wrote:
> On 3/20/2026 10:10 AM, Roberto Sassu wrote:
> > On Fri, 2026-03-20 at 09:58 -0700, steven chen wrote:
> > > On 3/20/2026 5:41 AM, Mimi Zohar wrote:
> > > > On Thu, 2026-03-19 at 14:31 -0700, steven chen wrote:
> > > >
> > > > > > - Support for deleting N measurement records (and pre-pending the remaining
> > > > > > measurement records)
> > > > > Is there any problem to bring work of "stage" step together to the
> > > > > deletion step?
> > > > >
> > > > > "Trim N" method does everything that "staged" method can do, right?
> > > > > what's the "stage" method can do but "trim N" method can't do?
> > > > >
> > > > > in user space, if in "staged" state, no other user space agent can
> > > > > access the IMA measure list, right?
> > > > >
> > > > > Could you explain the benefit of bringing the "stage" step?
> > > > The performance improvement is because "staging" the IMA measurement list takes
> > > > the lock in order to move the measurement list pointer and then releases it.
> > > > New measurements can then be appended to a new measurement list. Deleting
> > > > records is done without taking the lock to walk the staged measurement list.
> > > >
> > > > Without staging the measurement list, walking the measurement list to trim N
> > > > records requires taking and holding the lock. The performance is dependent on
> > > > the size of the measurement list.
> > > >
> > > > Your question isn't really about "staging" the measurement list records, but
> > > > requiring a userspace signal to delete them. To answer that question, deleting
> > > > N records (third patch) could imply staging all the measurement records and
> > > > immediately deleting N records without an explicit userspace signal.
> > > >
> > > > I expect the requested "documentation" patch will provide the motivation for the
> > > > delayed deletion of the measurement list.
> > > >
> > > > Mimi
> > > "Staging" is great on reducing kernel IMA measurement list locking time.
> > >
> > > How about just do "stage N" entries and then delete the staged list in
> > > one shot?
> > > It means merge two APIs into one API
> > > int ima_queue_stage(void)
> > > int ima_queue_delete_staged(unsigned long req_value)
> > >
> > > The kernel lock time will be the same. And user space lock time will be
> > > reduced.
> > It is not the same. The walk on the staged list is done without holding
> > ima_extend_list_mutex.
> >
> > Roberto
>
> Is it possible to merge two APIs work into one API?
> int ima_queue_stage(void)
> int ima_queue_delete_staged(unsigned long req_value)
It will be done transparently for the user. IMA will call both
functions for the same securityfs write.
Roberto
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v3 3/3] ima: Add support for staging measurements for deletion
2026-03-20 17:26 ` Roberto Sassu
@ 2026-03-20 17:40 ` steven chen
0 siblings, 0 replies; 12+ messages in thread
From: steven chen @ 2026-03-20 17:40 UTC (permalink / raw)
To: Roberto Sassu, Mimi Zohar, corbet, skhan, dmitry.kasatkin,
eric.snowberg, paul, jmorris, serge
Cc: linux-doc, linux-kernel, linux-integrity, linux-security-module,
gregorylumen, nramas, Roberto Sassu
On 3/20/2026 10:26 AM, Roberto Sassu wrote:
> On Fri, 2026-03-20 at 10:24 -0700, steven chen wrote:
>> On 3/20/2026 10:10 AM, Roberto Sassu wrote:
>>> On Fri, 2026-03-20 at 09:58 -0700, steven chen wrote:
>>>> On 3/20/2026 5:41 AM, Mimi Zohar wrote:
>>>>> On Thu, 2026-03-19 at 14:31 -0700, steven chen wrote:
>>>>>
>>>>>>> - Support for deleting N measurement records (and pre-pending the remaining
>>>>>>> measurement records)
>>>>>> Is there any problem to bring work of "stage" step together to the
>>>>>> deletion step?
>>>>>>
>>>>>> "Trim N" method does everything that "staged" method can do, right?
>>>>>> what's the "stage" method can do but "trim N" method can't do?
>>>>>>
>>>>>> in user space, if in "staged" state, no other user space agent can
>>>>>> access the IMA measure list, right?
>>>>>>
>>>>>> Could you explain the benefit of bringing the "stage" step?
>>>>> The performance improvement is because "staging" the IMA measurement list takes
>>>>> the lock in order to move the measurement list pointer and then releases it.
>>>>> New measurements can then be appended to a new measurement list. Deleting
>>>>> records is done without taking the lock to walk the staged measurement list.
>>>>>
>>>>> Without staging the measurement list, walking the measurement list to trim N
>>>>> records requires taking and holding the lock. The performance is dependent on
>>>>> the size of the measurement list.
>>>>>
>>>>> Your question isn't really about "staging" the measurement list records, but
>>>>> requiring a userspace signal to delete them. To answer that question, deleting
>>>>> N records (third patch) could imply staging all the measurement records and
>>>>> immediately deleting N records without an explicit userspace signal.
>>>>>
>>>>> I expect the requested "documentation" patch will provide the motivation for the
>>>>> delayed deletion of the measurement list.
>>>>>
>>>>> Mimi
>>>> "Staging" is great on reducing kernel IMA measurement list locking time.
>>>>
>>>> How about just do "stage N" entries and then delete the staged list in
>>>> one shot?
>>>> It means merge two APIs into one API
>>>> int ima_queue_stage(void)
>>>> int ima_queue_delete_staged(unsigned long req_value)
>>>>
>>>> The kernel lock time will be the same. And user space lock time will be
>>>> reduced.
>>> It is not the same. The walk on the staged list is done without holding
>>> ima_extend_list_mutex.
>>>
>>> Roberto
>> Is it possible to merge two APIs work into one API?
>> int ima_queue_stage(void)
>> int ima_queue_delete_staged(unsigned long req_value)
> It will be done transparently for the user. IMA will call both
> functions for the same securityfs write.
>
> Roberto
If merge two APIs into one API, it will reduce user space measurement
list lock time.
Thanks,
Steven
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2026-03-20 17:44 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-11 17:19 [PATCH v3 1/3] ima: Remove ima_h_table structure Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 2/3] ima: Replace static htable queue with dynamically allocated array Roberto Sassu
2026-03-11 17:19 ` [PATCH v3 3/3] ima: Add support for staging measurements for deletion Roberto Sassu
2026-03-17 21:03 ` Mimi Zohar
2026-03-19 21:31 ` steven chen
2026-03-20 12:41 ` Mimi Zohar
2026-03-20 16:58 ` steven chen
2026-03-20 17:10 ` Roberto Sassu
2026-03-20 17:24 ` steven chen
2026-03-20 17:26 ` Roberto Sassu
2026-03-20 17:40 ` steven chen
2026-03-17 19:15 ` [PATCH v3 1/3] ima: Remove ima_h_table structure Mimi Zohar
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox