From: Arun Menon <armenon@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Ani Sinha" <anisinha@redhat.com>,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
"Laurent Vivier" <lvivier@redhat.com>,
"Zhao Liu" <zhao1.liu@intel.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Stefan Berger" <stefanb@linux.vnet.ibm.com>,
marcandre.lureau@redhat.com, "Fabiano Rosas" <farosas@suse.de>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Igor Mammedov" <imammedo@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Yanan Wang" <wangyanan55@huawei.com>,
"Arun Menon" <armenon@redhat.com>
Subject: [RFC v2 6/7] hw/tpm: Add support for VM migration with TPM CRB chunking
Date: Thu, 19 Mar 2026 19:23:15 +0530 [thread overview]
Message-ID: <20260319135316.37412-7-armenon@redhat.com> (raw)
In-Reply-To: <20260319135316.37412-1-armenon@redhat.com>
- Add subsection in VMState for TPM CRB with the newly introduced
command and response buffers, along with a needed callback, so that
newer QEMU only sends the buffers if it is necessary.
- Add hw_compat blocker because the feature is only supported for
machine type 11.0 and higher.
- If the VM has no pending chunked TPM commands in the internal buffers
during a VM migration, or if the machine type does not support newly
introduced buffers, then the needed callback will return false, as it
checks the hw_compat blocker and thus the subsection will not be sent
to the destination host.
- Since the original command and response buffers are of type GByteArray,
they are serialized in pre-save hook before sending them to the
destination host and then restored back into original buffer using
the post-load hook.
Signed-off-by: Arun Menon <armenon@redhat.com>
---
hw/core/machine.c | 1 +
hw/tpm/tpm_crb.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 115 insertions(+)
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 6cf0e2f404..fcd6043c99 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -40,6 +40,7 @@
GlobalProperty hw_compat_10_2[] = {
{ "scsi-block", "migrate-pr", "off" },
+ { "tpm-crb", "migrate-buffers", "off"},
};
const size_t hw_compat_10_2_len = G_N_ELEMENTS(hw_compat_10_2);
diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c
index e61c04aee0..9ce342fe8a 100644
--- a/hw/tpm/tpm_crb.c
+++ b/hw/tpm/tpm_crb.c
@@ -33,6 +33,17 @@
#include "trace.h"
#include "qom/object.h"
+/* command and response buffers; part of VM state when migrating */
+typedef struct TPMCRBMigState {
+ uint32_t cmd_size;
+ uint8_t *cmd_tmp;
+
+ uint32_t rsp_size;
+ uint8_t *rsp_tmp;
+
+ uint32_t rsp_offset;
+} TPMCRBMigState;
+
struct CRBState {
DeviceState parent_obj;
@@ -49,6 +60,9 @@ struct CRBState {
bool ppi_enabled;
TPMPPI ppi;
+
+ bool migrate_buffers;
+ TPMCRBMigState mig;
};
typedef struct CRBState CRBState;
@@ -347,18 +361,118 @@ static int tpm_crb_pre_save(void *opaque)
return 0;
}
+static bool tpm_crb_chunk_needed(void *opaque)
+{
+ CRBState *s = opaque;
+
+ if (!s->migrate_buffers) {
+ return false;
+ }
+
+ return ((s->command_buffer && s->command_buffer->len > 0) ||
+ (s->response_buffer && s->response_buffer->len > 0));
+}
+
+static int tpm_crb_chunk_pre_save(void *opaque)
+{
+ CRBState *s = opaque;
+
+ if (s->command_buffer) {
+ s->mig.cmd_size = s->command_buffer->len;
+ s->mig.cmd_tmp = s->command_buffer->data;
+ } else {
+ s->mig.cmd_tmp = NULL;
+ s->mig.cmd_size = 0;
+ }
+
+ if (s->response_buffer) {
+ s->mig.rsp_size = s->response_buffer->len;
+ s->mig.rsp_tmp = s->response_buffer->data;
+ } else {
+ s->mig.rsp_tmp = NULL;
+ s->mig.rsp_size = 0;
+ }
+ s->mig.rsp_offset = (uint32_t)s->response_offset;
+ return 0;
+}
+
+static bool tpm_crb_chunk_post_load(void *opaque, int version_id, Error **errp)
+{
+ CRBState *s = opaque;
+
+ if (s->mig.cmd_size > s->be_buffer_size ||
+ s->mig.rsp_size > s->be_buffer_size ||
+ s->mig.rsp_offset > s->mig.rsp_size) {
+ error_setg(errp,
+ "tpm-crb-chunk: incoming buffer %u, exceeds limits %zu "
+ "or offset %u exceeds size %u",
+ s->mig.cmd_size, s->be_buffer_size,
+ s->mig.rsp_offset, s->mig.rsp_size);
+ g_free(s->mig.cmd_tmp);
+ s->mig.cmd_tmp = NULL;
+ g_free(s->mig.rsp_tmp);
+ s->mig.rsp_tmp = NULL;
+ return false;
+ }
+
+ if (s->mig.cmd_tmp) {
+ if (s->command_buffer) {
+ g_byte_array_unref(s->command_buffer);
+ }
+ s->command_buffer = g_byte_array_new_take(s->mig.cmd_tmp,
+ s->mig.cmd_size);
+ s->mig.cmd_tmp = NULL;
+ } else {
+ if (s->command_buffer) {
+ g_byte_array_set_size(s->command_buffer, 0);
+ }
+ }
+ if (s->mig.rsp_tmp) {
+ if (s->response_buffer) {
+ g_byte_array_unref(s->response_buffer);
+ }
+ s->response_buffer = g_byte_array_new_take(s->mig.rsp_tmp,
+ s->mig.rsp_size);
+ s->mig.rsp_tmp = NULL;
+ }
+ return true;
+}
+
+static const VMStateDescription vmstate_tpm_crb_chunk = {
+ .name = "tpm-crb/chunk",
+ .version_id = 1,
+ .needed = tpm_crb_chunk_needed,
+ .pre_save = tpm_crb_chunk_pre_save,
+ .post_load_errp = tpm_crb_chunk_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(mig.cmd_size, CRBState),
+ VMSTATE_VBUFFER_ALLOC_UINT32(mig.cmd_tmp, CRBState, 0, NULL,
+ mig.cmd_size),
+ VMSTATE_UINT32(mig.rsp_size, CRBState),
+ VMSTATE_VBUFFER_ALLOC_UINT32(mig.rsp_tmp, CRBState, 0, NULL,
+ mig.rsp_size),
+ VMSTATE_UINT32(mig.rsp_offset, CRBState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_tpm_crb = {
.name = "tpm-crb",
.pre_save = tpm_crb_pre_save,
.fields = (const VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, CRBState, TPM_CRB_R_MAX),
VMSTATE_END_OF_LIST(),
+ },
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_tpm_crb_chunk,
+ NULL,
}
};
static const Property tpm_crb_properties[] = {
DEFINE_PROP_TPMBE("tpmdev", CRBState, tpmbe),
DEFINE_PROP_BOOL("ppi", CRBState, ppi_enabled, true),
+ DEFINE_PROP_BOOL("migrate-buffers", CRBState, migrate_buffers, true),
};
static void tpm_crb_reset(void *dev)
--
2.53.0
next prev parent reply other threads:[~2026-03-19 13:55 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-19 13:53 [RFC v2 0/7] hw/tpm: CRB chunking capability to handle PQC Arun Menon
2026-03-19 13:53 ` [RFC v2 1/7] hw/tpm: Add TPM CRB chunking fields Arun Menon
2026-03-19 13:53 ` [RFC v2 2/7] hw/tpm: Refactor CRB_CTRL_START register access Arun Menon
2026-03-19 13:53 ` [RFC v2 3/7] hw/tpm: Add internal buffer state for chunking Arun Menon
2026-03-26 11:27 ` marcandre.lureau
2026-03-19 13:53 ` [RFC v2 4/7] hw/tpm: Implement TPM CRB chunking logic Arun Menon
2026-03-26 11:27 ` marcandre.lureau
2026-03-19 13:53 ` [RFC v2 5/7] test/qtest: Add test for tpm crb chunking Arun Menon
2026-03-26 11:27 ` marcandre.lureau
2026-03-26 11:32 ` Marc-André Lureau
2026-03-19 13:53 ` Arun Menon [this message]
2026-03-26 11:27 ` [RFC v2 6/7] hw/tpm: Add support for VM migration with TPM CRB chunking marcandre.lureau
2026-03-19 13:53 ` [RFC v2 7/7] hw/tpm: Increase TPM TIS max buffer size to 8192 Arun Menon
2026-03-20 18:57 ` Stefan Berger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260319135316.37412-7-armenon@redhat.com \
--to=armenon@redhat.com \
--cc=anisinha@redhat.com \
--cc=farosas@suse.de \
--cc=imammedo@redhat.com \
--cc=lvivier@redhat.com \
--cc=marcandre.lureau@redhat.com \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanb@linux.vnet.ibm.com \
--cc=wangyanan55@huawei.com \
--cc=zhao1.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox