qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
@ 2013-11-06 14:39 Corey Bryant
  2013-11-19 19:37 ` Stefan Berger
  0 siblings, 1 reply; 11+ messages in thread
From: Corey Bryant @ 2013-11-06 14:39 UTC (permalink / raw)
  To: qemu-devel; +Cc: Corey Bryant, aliguori

This patch provides a software TPM backend implementation. The
core software TPM functionality is provided by the libtpms
library.  With this patch, multiple guests can run with their
own emulated TPMs.

The libtpms repository can be found at:
https://github.com/coreycb/libtpms

Signed-off-by: Corey Bryant <coreyb@linux.vnet.ibm.com>
---
 configure            |   24 ++
 hw/tpm/Makefile.objs |    2 +
 hw/tpm/tpm_libtpms.c |  885 ++++++++++++++++++++++++++++++++++++++++++++++++++
 qemu-options.hx      |   31 ++-
 tpm.c                |    2 +-
 5 files changed, 941 insertions(+), 3 deletions(-)
 create mode 100644 hw/tpm/tpm_libtpms.c

diff --git a/configure b/configure
index 9addff1..085142f 100755
--- a/configure
+++ b/configure
@@ -2475,6 +2475,26 @@ else
 fi
 
 ##########################################
+# TPM libtpms probe
+
+tpm_libtpms=no
+if test "$tpm" != "no" ; then
+cat > $TMPC <<EOF
+#include <libtpms/tpm_library.h>
+#include <libtpms/tpm_error.h>
+#include <libtpms/tpm_memory.h>
+#include <libtpms/tpm_nvfilename.h>
+#include <libtpms/tpm_tis.h>
+int main(int argc, char **argv) { TPMLIB_GetVersion(); return 0; }
+EOF
+  libtpms_libs=`$pkg_config libtpms --libs 2> /dev/null`
+  if compile_prog "$libtpms_libs" ; then
+    tpm_libtpms=$tpm
+    libs_softmmu="$libs_softmmu $libtpms_libs"
+  fi
+fi
+
+##########################################
 # adjust virtio-blk-data-plane based on linux-aio
 
 if test "$virtio_blk_data_plane" = "yes" -a \
@@ -3746,6 +3766,7 @@ echo "gcov enabled      $gcov"
 echo "TPM support       $tpm"
 echo "libssh2 support   $libssh2"
 echo "TPM passthrough   $tpm_passthrough"
+echo "TPM libtpms       $tpm_libtpms"
 echo "QOM debugging     $qom_cast_debug"
 
 if test "$sdl_too_old" = "yes"; then
@@ -4154,6 +4175,9 @@ if test "$tpm" = "yes"; then
   if test "$tpm_passthrough" = "yes"; then
     echo "CONFIG_TPM_PASSTHROUGH=y" >> $config_host_mak
   fi
+  if test "$tpm_libtpms" = "yes"; then
+    echo "CONFIG_TPM_LIBTPMS=y" >> $config_host_mak
+  fi
 fi
 
 # use default implementation for tracing backend-specific routines
diff --git a/hw/tpm/Makefile.objs b/hw/tpm/Makefile.objs
index 99f5983..77e9065 100644
--- a/hw/tpm/Makefile.objs
+++ b/hw/tpm/Makefile.objs
@@ -1,2 +1,4 @@
 common-obj-$(CONFIG_TPM_TIS) += tpm_tis.o
 common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o
+common-obj-$(CONFIG_TPM_LIBTPMS) += tpm_libtpms.o
+common-obj-$(CONFIG_TPM_LIBTPMS) += tpm_nvram.o
diff --git a/hw/tpm/tpm_libtpms.c b/hw/tpm/tpm_libtpms.c
new file mode 100644
index 0000000..f9c1f80
--- /dev/null
+++ b/hw/tpm/tpm_libtpms.c
@@ -0,0 +1,885 @@
+/*
+ * libtpms TPM driver
+ *
+ * Copyright (C) 2013 IBM Corporation
+ *
+ * Authors:
+ *  Stefan Berger   <stefanb@us.ibm.com>
+ *  Corey Bryant    <coreyb@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "sysemu/tpm_backend.h"
+#include "tpm_int.h"
+#include "tpm_nvram.h"
+#include "qapi/qmp/qerror.h"
+#include "migration/migration.h"
+#include "sysemu/tpm_backend_int.h"
+
+#include <libtpms/tpm_library.h>
+#include <libtpms/tpm_error.h>
+#include <libtpms/tpm_memory.h>
+#include <libtpms/tpm_nvfilename.h>
+#include <libtpms/tpm_tis.h>
+
+/* #define DEBUG_TPM */
+
+#ifdef DEBUG_TPM
+#define DPRINTF(fmt, ...) \
+    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#define DPRINTF_BUFFER(buffer, len) \
+    do { tpm_ltpms_dump_buffer(stderr, buffer, len); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#define DPRINTF_BUFFER(buffer, len) \
+    do { } while (0)
+#endif
+
+#define NVRAM_BLOB_OFFSET_FROM_ENTRY(entry_offset) \
+    (entry_offset + sizeof(uint32_t))
+
+#define TYPE_TPM_LIBTPMS "tpm-libtpms"
+#define TPM_LIBTPMS(obj) \
+    OBJECT_CHECK(TPMLTPMsState, (obj), TYPE_TPM_LIBTPMS)
+
+static const TPMDriverOps tpm_ltpms_driver;
+
+/* data structures */
+typedef struct TPMLTPMsThreadParams {
+    TPMState *tpm_state;
+
+    TPMRecvDataCB *recv_data_callback;
+    TPMBackend *tb;
+} TPMLTPMsThreadParams;
+
+struct NVRAMEntry {
+    uint32_t cur_size;
+    uint8_t *buffer;
+};
+
+typedef struct NVRAMEntry NVRAMEntry;
+
+struct TPMLTPMsState {
+    TPMBackend parent;
+
+    TPMBackendThread tbt;
+
+    TPMLTPMsThreadParams tpm_thread_params;
+
+    bool tpm_initialized;
+    bool had_fatal_error;
+
+    BlockDriverState *bdrv;
+
+    NVRAMEntry *perm_state_entry;
+    NVRAMEntry *save_state_entry;
+    NVRAMEntry *vola_state_entry;
+
+    uint32_t perm_state_entry_offset;
+    uint32_t save_state_entry_offset;
+    uint32_t vola_state_entry_offset;
+
+    uint32_t perm_state_max_size;
+    uint32_t save_state_max_size;
+    uint32_t vola_state_max_size;
+
+    QemuMutex tpm_initialized_mutex;
+
+    uint8_t locty; /* locality of command being executed by libtpms */
+};
+
+typedef struct TPMLTPMsState TPMLTPMsState;
+
+static TPMBackend *tpm_backend;
+
+/* functions */
+
+#ifdef DEBUG_TPM
+static inline void tpm_ltpms_dump_buffer(FILE *stream, unsigned char *buffer,
+                                         unsigned int len)
+{
+    int i;
+
+    for (i = 0; i < len; i++) {
+        if (i && !(i % 16)) {
+            fprintf(stream, "\n");
+        }
+        fprintf(stream, "%.2X ", buffer[i]);
+    }
+    fprintf(stream, "\n");
+}
+#endif
+
+static inline void tpm_ltpms_free_nvram_entry(NVRAMEntry **entry)
+{
+    if (*entry) {
+        TPM_Free((unsigned char *)*entry);
+        *entry = NULL;
+    }
+}
+
+static inline void tpm_ltpms_free_nvram_buffer(NVRAMEntry *entry)
+{
+    if (entry && entry->buffer) {
+        TPM_Free(entry->buffer);
+        entry->buffer = NULL;
+        entry->cur_size = 0;
+    }
+}
+
+static inline void tpm_ltpms_free_nvram_all(TPMLTPMsState *tpm_ltpms)
+{
+    tpm_ltpms_free_nvram_buffer(tpm_ltpms->perm_state_entry);
+    tpm_ltpms_free_nvram_buffer(tpm_ltpms->save_state_entry);
+    tpm_ltpms_free_nvram_buffer(tpm_ltpms->vola_state_entry);
+
+    tpm_ltpms_free_nvram_entry(&tpm_ltpms->perm_state_entry);
+    tpm_ltpms_free_nvram_entry(&tpm_ltpms->save_state_entry);
+    tpm_ltpms_free_nvram_entry(&tpm_ltpms->vola_state_entry);
+}
+
+/*
+ * Calls into libtpms to get a runtime property of the TPM
+ */
+static int tpmlib_get_prop(enum TPMLIB_TPMProperty prop)
+{
+    int result;
+
+    TPM_RESULT res = TPMLIB_GetTPMProperty(prop, &result);
+    assert(res == TPM_SUCCESS);
+
+    return result;
+}
+
+/*
+ * Generates the drive offsets where NVRAM blobs are stored.  Each offset
+ * allows for enough room to store the current blob size plus a blob of
+ * the maximum size.
+ */
+static void tpm_ltpms_get_nvram_offsets(TPMLTPMsState *tpm_ltpms)
+{
+    tpm_ltpms->perm_state_entry_offset = 0;
+    tpm_ltpms->perm_state_max_size = tpmlib_get_prop(TPMPROP_TPM_MAX_NV_SPACE);
+
+    tpm_ltpms->save_state_entry_offset =
+        ROUND_UP(tpm_ltpms->perm_state_entry_offset + sizeof(uint32_t) +
+                 tpm_ltpms->perm_state_max_size + 1, 1024);
+    tpm_ltpms->save_state_max_size =
+        tpmlib_get_prop(TPMPROP_TPM_MAX_SAVESTATE_SPACE);
+
+    tpm_ltpms->vola_state_entry_offset =
+        ROUND_UP(tpm_ltpms->save_state_entry_offset + sizeof(uint32_t) +
+                 tpm_ltpms->save_state_max_size + 1, 1024);
+    tpm_ltpms->vola_state_max_size =
+        tpmlib_get_prop(TPMPROP_TPM_MAX_VOLATILESTATE_SPACE);
+}
+
+/*
+ * Writes an NVRAM entry and it's blob to the specified drive offset
+ */
+static int tpm_ltpms_write_to_nvram(TPMLTPMsState *tpm_ltpms, uint32_t offset,
+                                    NVRAMEntry *entry, uint32_t max_size)
+{
+    int rc;
+    uint8_t *buffer = entry->buffer;
+    uint32_t size = entry->cur_size;
+    BlockDriverState *bdrv = tpm_ltpms->bdrv;
+
+    DPRINTF("tpm_libtpms: Writing NVRAM entry to offset %"PRIu32"\n", offset);
+
+    if (tpm_ltpms->had_fatal_error) {
+        return TPM_FAIL;
+    }
+
+    if (size > max_size) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM blob size too big");
+        return TPM_FAIL;
+    }
+
+    DPRINTF("tpm_libtpms: current blob size = %"PRIu32"\n", size);
+
+    /* Write the blob */
+    if (size > 0) {
+        DPRINTF_BUFFER(buffer, size);
+
+        rc = tpm_nvram_bdrv_write(bdrv, NVRAM_BLOB_OFFSET_FROM_ENTRY(offset),
+                                  buffer, size);
+        if (rc != size) {
+            qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM write failed");
+            return rc;
+        }
+    }
+
+    /* Blob size is stored on disk in big-endian */
+    size = cpu_to_be32(size);
+
+    /* Write the blob size */
+    rc = tpm_nvram_bdrv_write(bdrv, offset, (uint8_t *)&size, sizeof(size));
+    if (rc != sizeof(size)) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM write failed");
+        return rc;
+    }
+
+    return TPM_SUCCESS;
+}
+
+/*
+ * Reads an NVRAM entry and it's blob from the specified drive offset
+ */
+static int tpm_ltpms_read_from_nvram(TPMLTPMsState *tpm_ltpms, uint32_t offset,
+                                     NVRAMEntry **entry, uint32_t max_size)
+{
+    int rc;
+    uint8_t *buffer = NULL;
+    uint32_t *size = NULL;
+    BlockDriverState *bdrv = tpm_ltpms->bdrv;
+
+    DPRINTF("tpm_libtpms: Reading NVRAM entry from offset %"PRIu32"\n", offset);
+
+    if (tpm_ltpms->had_fatal_error) {
+        return TPM_FAIL;
+    }
+
+    /* Allocate the in-memory blob entry */
+    rc = TPM_Malloc((unsigned char **)entry, sizeof(**entry));
+    if (rc != TPM_SUCCESS) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                      "TPM memory allocation failed");
+        abort();
+    }
+
+    /* Read the blob size */
+    rc = tpm_nvram_bdrv_read(bdrv, offset, (uint8_t **)&size, sizeof(*size));
+    if (rc != sizeof(*size)) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM read failed");
+        goto err_exit;
+    }
+
+    /* Blob size is stored on disk in big-endian */
+    *size = be32_to_cpu(*size);
+
+    if (*size > max_size) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM blob size too big");
+        rc = TPM_FAIL;
+        goto err_exit;
+    }
+
+    DPRINTF("tpm_libtpms: current blob size = %"PRIu32"\n", *size);
+
+    (*entry)->cur_size = *size;
+    (*entry)->buffer = NULL;
+
+    /* Read the blob */
+    if (*size > 0) {
+        rc = tpm_nvram_bdrv_read(bdrv, NVRAM_BLOB_OFFSET_FROM_ENTRY(offset),
+                                 &buffer, *size);
+        if (rc != *size) {
+            qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM read failed");
+            goto err_exit;
+        }
+
+        (*entry)->buffer = buffer;
+
+        DPRINTF_BUFFER(buffer, *size);
+    }
+
+    rc = TPM_SUCCESS;
+
+err_exit:
+    if (size) {
+        TPM_Free((uint8_t *)size);
+    }
+
+    return rc;
+}
+
+/*
+ * Loads the TPM's NVRAM state from NVRAM drive into memory
+ */
+static int tpm_ltpms_load_tpm_state_from_nvram(TPMLTPMsState *tpm_ltpms)
+{
+    int rc;
+
+    rc = tpm_ltpms_read_from_nvram(tpm_ltpms,
+                                   tpm_ltpms->perm_state_entry_offset,
+                                   &tpm_ltpms->perm_state_entry,
+                                   tpm_ltpms->perm_state_max_size);
+    if (rc) {
+        goto err_exit;
+    }
+
+    rc = tpm_ltpms_read_from_nvram(tpm_ltpms,
+                                   tpm_ltpms->save_state_entry_offset,
+                                   &tpm_ltpms->save_state_entry,
+                                   tpm_ltpms->save_state_max_size);
+    if (rc) {
+        goto err_exit;
+    }
+
+    rc = tpm_ltpms_read_from_nvram(tpm_ltpms,
+                                   tpm_ltpms->vola_state_entry_offset,
+                                   &tpm_ltpms->vola_state_entry,
+                                   tpm_ltpms->vola_state_max_size);
+    if (rc) {
+        goto err_exit;
+    }
+
+    return 0;
+
+err_exit:
+    tpm_ltpms->had_fatal_error = true;
+
+    return rc;
+}
+
+/*
+ * Processes a command request by calling into libtpms, and returns
+ * result to front end
+ */
+static void tpm_ltpms_process_request(TPMLTPMsState *tpm_ltpms,
+                                      TPMLTPMsThreadParams *thr_parms)
+{
+    TPM_RESULT res;
+    uint32_t in_len, out_len;
+    uint8_t *in, *out;
+    uint32_t resp_size;
+    TPMLocality *locty_data;
+
+    DPRINTF("tpm_libtpms: processing command\n");
+
+    tpm_ltpms->locty = thr_parms->tpm_state->locty_number;
+
+    locty_data = thr_parms->tpm_state->locty_data;
+
+    in      = locty_data->w_buffer.buffer;
+    in_len  = locty_data->w_offset;
+    out     = locty_data->r_buffer.buffer;
+    out_len = locty_data->r_buffer.size;
+
+    if (tpm_ltpms->tpm_initialized) {
+        DPRINTF("tpm_libtpms: received %d bytes from VM in locality %d\n",
+                in_len, tpm_ltpms->locty);
+        DPRINTF_BUFFER(in, in_len);
+
+        resp_size = 0;
+
+        res = TPMLIB_Process(&out, &resp_size, &out_len, in, in_len);
+        if (res == TPM_SUCCESS) {
+            goto send_response;
+        }
+        qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                      "TPM libtpms command processing failed");
+    } else {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                      "TPM libtpms not initialized");
+    }
+
+    resp_size = tpm_write_fatal_error_response(out, out_len);
+
+send_response:
+    DPRINTF("tpm_libtpms: sending %d bytes to TPM front-end\n", resp_size);
+    DPRINTF_BUFFER(out, resp_size);
+
+    thr_parms->recv_data_callback(thr_parms->tpm_state, tpm_ltpms->locty);
+
+    return;
+}
+
+static void tpm_ltpms_worker_thread(gpointer data, gpointer user_data)
+{
+    TPM_RESULT res;
+    TPMLTPMsThreadParams *thr_parms = user_data;
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(thr_parms->tb);
+    TPMBackendCmd cmd = (TPMBackendCmd)data;
+
+    tpm_backend = thr_parms->tb;
+
+    DPRINTF("tpm_libtpms: processing command type %d\n", cmd);
+
+    switch (cmd) {
+    case TPM_BACKEND_CMD_TPM_RESET:
+        if (tpm_ltpms->tpm_initialized) {
+            qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
+            tpm_ltpms->tpm_initialized = false;
+            qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
+
+            TPMLIB_Terminate();
+        }
+        /* fall through */
+    case TPM_BACKEND_CMD_INIT:
+        res = TPMLIB_MainInit();
+        if (res == TPM_SUCCESS) {
+            qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
+            tpm_ltpms->tpm_initialized = true;
+            qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
+        } else {
+            qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                          "TPM libtpms initialization failed");
+            abort();
+        }
+        break;
+    case TPM_BACKEND_CMD_PROCESS_CMD:
+        tpm_ltpms_process_request(tpm_ltpms, thr_parms);
+        break;
+    case TPM_BACKEND_CMD_END:
+        if (tpm_ltpms->tpm_initialized) {
+            qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
+            tpm_ltpms->tpm_initialized = false;
+            qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
+
+            TPMLIB_Terminate();
+            tpm_ltpms_free_nvram_all(tpm_ltpms);
+        }
+        break;
+    }
+}
+
+/*****************************************************************
+ * libtpms TPM library callbacks
+ ****************************************************************/
+
+/*
+ * Called by libtpms before any access to persistent storage is done
+ */
+static TPM_RESULT tpm_ltpms_nvram_init(void)
+{
+    int rc;
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
+
+    tpm_ltpms->bdrv = bdrv_find(tpm_backend->nvram_id);
+    if (!tpm_ltpms->bdrv) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM 'nvram' drive not found");
+        abort();
+    }
+
+    rc = tpm_nvram_bdrv_init(tpm_ltpms->bdrv);
+    if (rc) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM drive init failed");
+        abort();
+    }
+
+    tpm_ltpms_get_nvram_offsets(tpm_ltpms);
+
+    rc = tpm_ltpms_load_tpm_state_from_nvram(tpm_ltpms);
+    if (rc) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM load state failed");
+        abort();
+    }
+
+    return TPM_SUCCESS;
+}
+
+/*
+ * Called by libtpms when the TPM wants to load state from persistent
+ * storage
+ */
+static TPM_RESULT tpm_ltpms_nvram_loaddata(unsigned char **data,
+                                           uint32_t *length,
+                                           uint32_t tpm_number,
+                                           const char *name)
+{
+    TPM_RESULT rc = TPM_SUCCESS;
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
+    NVRAMEntry **entry = NULL;
+
+    DPRINTF("tpm_libtpms: Loading NVRAM state '%s' from storage\n", name);
+
+    if (tpm_ltpms->had_fatal_error) {
+        return TPM_FAIL;
+    }
+
+    *length = 0;
+
+    if (!strcmp(name, TPM_PERMANENT_ALL_NAME)) {
+        entry = &tpm_ltpms->perm_state_entry;
+    } else if (!strcmp(name, TPM_SAVESTATE_NAME)) {
+        entry = &tpm_ltpms->save_state_entry;
+    } else if (!strcmp(name, TPM_VOLATILESTATE_NAME)) {
+        entry = &tpm_ltpms->vola_state_entry;
+    }
+
+    /* In-memory entries are allocated for the life of the backend */
+    assert(entry != NULL);
+
+    *length = (*entry)->cur_size;
+    if (*length > 0) {
+        rc = TPM_Malloc(data, *length);
+        if (rc == TPM_SUCCESS) {
+            memcpy(*data, (*entry)->buffer, *length);
+        } else {
+            qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                          "TPM memory allocation failed");
+            abort();
+        }
+    }
+
+    if (*length == 0) {
+        rc = TPM_RETRY;
+    }
+
+    DPRINTF("tpm_libtpms: Read %"PRIu32" bytes from storage\n", *length);
+
+    return rc;
+}
+
+/*
+ * Called by libtpms when the TPM wants to store state to persistent
+ * storage
+ */
+static TPM_RESULT tpm_ltpms_nvram_storedata(const unsigned char *data,
+                                            uint32_t length,
+                                            uint32_t tpm_number,
+                                            const char *name)
+{
+    TPM_RESULT rc = TPM_SUCCESS;
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
+    NVRAMEntry *entry = NULL;
+    uint32_t offset = 0, max_size = 0;
+
+    DPRINTF("tpm_libtpms: Storing NVRAM state '%s' to storage\n", name);
+
+    if (tpm_ltpms->had_fatal_error) {
+        return TPM_FAIL;
+    }
+
+    if (!strcmp(name, TPM_PERMANENT_ALL_NAME)) {
+        entry = tpm_ltpms->perm_state_entry;
+        offset = tpm_ltpms->perm_state_entry_offset;
+        max_size = tpm_ltpms->perm_state_max_size;
+    } else if (!strcmp(name, TPM_SAVESTATE_NAME)) {
+        entry = tpm_ltpms->save_state_entry;
+        offset = tpm_ltpms->save_state_entry_offset;
+        max_size = tpm_ltpms->save_state_max_size;
+    } else if (!strcmp(name, TPM_VOLATILESTATE_NAME)) {
+        entry = tpm_ltpms->vola_state_entry;
+        offset = tpm_ltpms->vola_state_entry_offset;
+        max_size = tpm_ltpms->vola_state_max_size;
+    }
+
+    /* In-memory entries are allocated for the life of the backend */
+    assert(entry != NULL);
+
+    if (length > 0) {
+        rc = TPM_Realloc(&entry->buffer, length);
+        if (rc != TPM_SUCCESS) {
+            qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                          "TPM memory allocation failed");
+            abort();
+        }
+        memcpy(entry->buffer, data, length);
+        entry->cur_size = length;
+    } else {
+        tpm_ltpms_free_nvram_buffer(entry);
+    }
+
+    if (tpm_ltpms_write_to_nvram(tpm_ltpms, offset, entry, max_size)) {
+        goto err_exit;
+    }
+
+    DPRINTF("tpm_libtpms: Wrote %"PRIu32" bytes to storage\n", length);
+
+    return rc;
+
+err_exit:
+    tpm_ltpms->had_fatal_error = true;
+
+    return TPM_FAIL;
+}
+
+/*
+ * Called by libtpms when the TPM wants to delete state from persistent
+ * storage
+ */
+static TPM_RESULT tpm_ltpms_nvram_deletename(uint32_t tpm_number,
+                                             const char *name,
+                                             TPM_BOOL mustExist)
+{
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
+    NVRAMEntry *entry = NULL;
+    uint32_t offset = 0, max_size = 0;
+
+    DPRINTF("tpm_libtpms: Deleting NVRAM state '%s' from storage\n", name);
+
+    if (tpm_ltpms->had_fatal_error) {
+        return TPM_FAIL;
+    }
+
+    if (!strcmp(name, TPM_PERMANENT_ALL_NAME)) {
+        entry = tpm_ltpms->perm_state_entry;
+        offset = tpm_ltpms->perm_state_entry_offset;
+        max_size = tpm_ltpms->perm_state_max_size;
+    } else if (!strcmp(name, TPM_SAVESTATE_NAME)) {
+        entry = tpm_ltpms->save_state_entry;
+        offset = tpm_ltpms->save_state_entry_offset;
+        max_size = tpm_ltpms->save_state_max_size;
+    } else if (!strcmp(name, TPM_VOLATILESTATE_NAME)) {
+        entry = tpm_ltpms->vola_state_entry;
+        offset = tpm_ltpms->vola_state_entry_offset;
+        max_size = tpm_ltpms->vola_state_max_size;
+    }
+
+    if (entry) {
+        tpm_ltpms_free_nvram_buffer(entry);
+
+        if (tpm_ltpms_write_to_nvram(tpm_ltpms, offset, entry, max_size)) {
+            goto err_exit;
+        }
+    }
+
+    DPRINTF("tpm_libtpms: Deleted NVRAM state '%s' from storage\n", name);
+
+    return TPM_SUCCESS;
+
+err_exit:
+    tpm_ltpms->had_fatal_error = true;
+
+    return TPM_FAIL;
+}
+
+/*
+ * Called by libtpms to initialize the I/O subsystem of the TPM
+ */
+static TPM_RESULT tpm_ltpms_io_init(void)
+{
+    return TPM_SUCCESS;
+}
+
+/*
+ * Called by libtpms when the TPM needs to determine the locality under
+ * which a command is supposed to be executed
+ */
+static TPM_RESULT tpm_ltpms_io_getlocality(TPM_MODIFIER_INDICATOR *
+                                           localityModifier,
+                                           uint32_t tpm_number)
+{
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
+
+    *localityModifier = (TPM_MODIFIER_INDICATOR)tpm_ltpms->locty;
+
+    return TPM_SUCCESS;
+}
+
+/*
+ * Called by libtpms when the TPM needs to determine whether physical
+ * presence has been asserted
+ */
+static TPM_RESULT tpm_ltpms_io_getphysicalpresence(TPM_BOOL *physicalPresence,
+                                                   uint32_t tpm_number)
+{
+    *physicalPresence = FALSE;
+
+    return TPM_SUCCESS;
+}
+
+struct libtpms_callbacks callbacks = {
+    .sizeOfStruct               = sizeof(struct libtpms_callbacks),
+    .tpm_nvram_init             = tpm_ltpms_nvram_init,
+    .tpm_nvram_loaddata         = tpm_ltpms_nvram_loaddata,
+    .tpm_nvram_storedata        = tpm_ltpms_nvram_storedata,
+    .tpm_nvram_deletename       = tpm_ltpms_nvram_deletename,
+    .tpm_io_init                = tpm_ltpms_io_init,
+    .tpm_io_getlocality         = tpm_ltpms_io_getlocality,
+    .tpm_io_getphysicalpresence = tpm_ltpms_io_getphysicalpresence,
+};
+
+/*****************************************************************/
+
+/*
+ * Start the TPM (thread).  If it had been started before, then terminate
+ * and start it again.
+ */
+static int tpm_ltpms_startup_tpm(TPMBackend *tb)
+{
+    struct TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
+
+    /* 'power-reset' a running TPM; if none is running start one */
+    tpm_backend_thread_tpm_reset(&tpm_ltpms->tbt, tpm_ltpms_worker_thread,
+                                 &tpm_ltpms->tpm_thread_params);
+
+    return 0;
+}
+
+static void tpm_ltpms_terminate_tpm_thread(TPMBackend *tb)
+{
+    struct TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
+
+    tpm_backend_thread_end(&tpm_ltpms->tbt);
+}
+
+static void tpm_ltpms_reset(TPMBackend *tb)
+{
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
+
+    DPRINTF("tpm_libtpms: Resetting TPM libtpms backend\n");
+
+    tpm_ltpms_terminate_tpm_thread(tb);
+
+    tpm_ltpms->had_fatal_error = false;
+}
+
+static int tpm_ltpms_init(TPMBackend *tb, TPMState *s,
+                          TPMRecvDataCB *recv_data_cb)
+{
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
+
+    if (TPMLIB_RegisterCallbacks(&callbacks) != TPM_SUCCESS) {
+        qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                      "TPM libtpms callback registration failed");
+        return -1;
+    }
+
+    tpm_ltpms->tpm_thread_params.tpm_state = s;
+    tpm_ltpms->tpm_thread_params.recv_data_callback = recv_data_cb;
+    tpm_ltpms->tpm_thread_params.tb = tb;
+
+    qemu_mutex_init(&tpm_ltpms->tpm_initialized_mutex);
+
+    return 0;
+}
+
+static bool tpm_ltpms_get_tpm_established_flag(TPMBackend *tb)
+{
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
+    TPM_BOOL tpmEstablished = false;
+
+    qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
+    if (tpm_ltpms->tpm_initialized) {
+        TPM_IO_TpmEstablished_Get(&tpmEstablished);
+    }
+    qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
+
+    return tpmEstablished;
+}
+
+static bool tpm_ltpms_get_startup_error(TPMBackend *tb)
+{
+    return false;
+}
+
+static size_t tpm_ltpms_realloc_buffer(TPMSizedBuffer *sb)
+{
+    size_t wanted_size = tpmlib_get_prop(TPMPROP_TPM_BUFFER_MAX);
+
+    if (sb->size != wanted_size) {
+        TPM_RESULT res = TPM_Realloc(&sb->buffer, wanted_size);
+        if (res == TPM_SUCCESS) {
+            sb->size = wanted_size;
+        } else {
+            qerror_report(ERROR_CLASS_GENERIC_ERROR,
+                          "TPM memory allocation failed");
+            abort();
+        }
+    }
+    return sb->size;
+}
+
+static void tpm_ltpms_deliver_request(TPMBackend *tb)
+{
+    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
+
+    tpm_backend_thread_deliver_request(&tpm_ltpms->tbt);
+}
+
+static void tpm_ltpms_cancel_cmd(TPMBackend *be)
+{
+}
+
+static const char *tpm_ltpms_create_desc(void)
+{
+    return "libtpms TPM backend driver";
+}
+
+static TPMBackend *tpm_ltpms_create(QemuOpts *opts, const char *id)
+{
+    Object *obj = object_new(TYPE_TPM_LIBTPMS);
+    TPMBackend *tb = TPM_BACKEND(obj);
+    const char *value;
+
+    tb->id = g_strdup(id);
+    tb->fe_model = -1;
+    tb->ops = &tpm_ltpms_driver;
+
+    value = qemu_opt_get(opts, "nvram");
+    if (!value) {
+        qerror_report(QERR_MISSING_PARAMETER, "nvram");
+        goto err_exit;
+    }
+    tb->nvram_id = g_strdup(value);
+
+    return tb;
+
+err_exit:
+    g_free(tb->id);
+
+    return NULL;
+}
+
+static void tpm_ltpms_destroy(TPMBackend *tb)
+{
+    tpm_ltpms_terminate_tpm_thread(tb);
+
+    g_free(tb->id);
+    g_free(tb->nvram_id);
+}
+
+static const QemuOptDesc tpm_ltpms_cmdline_opts[] = {
+    TPM_STANDARD_CMDLINE_OPTS,
+    {
+        .name = "nvram",
+        .type = QEMU_OPT_STRING,
+        .help = "NVRAM drive id",
+    },
+    { /* end of list */ },
+};
+
+static const TPMDriverOps tpm_ltpms_driver = {
+    .type                     = TPM_TYPE_LIBTPMS,
+    .opts                     = tpm_ltpms_cmdline_opts,
+    .desc                     = tpm_ltpms_create_desc,
+    .create                   = tpm_ltpms_create,
+    .destroy                  = tpm_ltpms_destroy,
+    .init                     = tpm_ltpms_init,
+    .startup_tpm              = tpm_ltpms_startup_tpm,
+    .realloc_buffer           = tpm_ltpms_realloc_buffer,
+    .reset                    = tpm_ltpms_reset,
+    .had_startup_error        = tpm_ltpms_get_startup_error,
+    .deliver_request          = tpm_ltpms_deliver_request,
+    .cancel_cmd               = tpm_ltpms_cancel_cmd,
+    .get_tpm_established_flag = tpm_ltpms_get_tpm_established_flag,
+};
+
+
+static void tpm_ltpms_inst_init(Object *obj)
+{
+}
+
+static void tpm_ltpms_inst_finalize(Object *obj)
+{
+}
+
+static void tpm_ltpms_class_init(ObjectClass *klass, void *data)
+{
+    TPMBackendClass *tbc = TPM_BACKEND_CLASS(klass);
+
+    tbc->ops = &tpm_ltpms_driver;
+}
+
+static const TypeInfo tpm_ltpms_info = {
+    .name = TYPE_TPM_LIBTPMS,
+    .parent = TYPE_TPM_BACKEND,
+    .instance_size = sizeof(TPMLTPMsState),
+    .class_init = tpm_ltpms_class_init,
+    .instance_init = tpm_ltpms_inst_init,
+    .instance_finalize = tpm_ltpms_inst_finalize,
+};
+
+static void tpm_libtpms_register(void)
+{
+    type_register_static(&tpm_ltpms_info);
+    tpm_register_driver(&tpm_ltpms_driver);
+}
+
+type_init(tpm_libtpms_register)
diff --git a/qemu-options.hx b/qemu-options.hx
index 5dc8b75..356978e 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -2251,7 +2251,9 @@ DEF("tpmdev", HAS_ARG, QEMU_OPTION_tpmdev, \
     "-tpmdev passthrough,id=id[,path=path][,cancel-path=path]\n"
     "                use path to provide path to a character device; default is /dev/tpm0\n"
     "                use cancel-path to provide path to TPM's cancel sysfs entry; if\n"
-    "                not provided it will be searched for in /sys/class/misc/tpm?/device\n",
+    "                not provided it will be searched for in /sys/class/misc/tpm?/device\n"
+    "-tpmdev libtpms,id=id,nvram=drive-id\n"
+    "                use nvram to provide the NVRAM drive id\n",
     QEMU_ARCH_ALL)
 STEXI
 
@@ -2261,7 +2263,8 @@ The general form of a TPM device option is:
 @item -tpmdev @var{backend} ,id=@var{id} [,@var{options}]
 @findex -tpmdev
 Backend type must be:
-@option{passthrough}.
+@option{passthrough}, or
+@option{libtpms}.
 
 The specific backend type will determine the applicable options.
 The @code{-tpmdev} option creates the TPM backend and requires a
@@ -2311,6 +2314,30 @@ To create a passthrough TPM use the following two options:
 Note that the @code{-tpmdev} id is @code{tpm0} and is referenced by
 @code{tpmdev=tpm0} in the device option.
 
+@item -tpmdev libtpms, id=@var{id}, nvram=@var{drive-id}
+
+Enable access to the libtpms-based emulated TPM.
+
+@option{nvram} specifies the drive id of the NVRAM drive.
+
+Some notes about using the libtpms-based emulated TPM:
+
+To create a libtpms-based TPM, use the following options:
+@example
+-drive file=<path to image file>,if=none,id=tpm-nvram \
+-tpmdev libtpms,id=tpm0,nvram=tpm-nvram \
+-device tpm-tis,tpmdev=tpm0
+@end example
+
+The @code{drive} option provides the path to the image file where the
+TPM's persistent NVRAM data will be stored. Using the @code{qemu-img} tool,
+such an image can be created with a size of 500K.
+
+Note that the @code{-tpmdev} id is @code{tpm0} and is referenced by
+@code{tpmdev=tpm0} in the @code{-device} option. Similarly, the @code{-drive}
+id @code{tpm-nvram} is referenced by @code{nvram=tpm-nvram} in the
+@code{-tpmdev} option.
+
 @end table
 
 ETEXI
diff --git a/tpm.c b/tpm.c
index 2f4ef52..a08e3fd 100644
--- a/tpm.c
+++ b/tpm.c
@@ -26,7 +26,7 @@ static QLIST_HEAD(, TPMBackend) tpm_backends =
 
 
 #define TPM_MAX_MODELS      1
-#define TPM_MAX_DRIVERS     1
+#define TPM_MAX_DRIVERS     2
 
 static TPMDriverOps const *be_drivers[TPM_MAX_DRIVERS] = {
     NULL,
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-11-06 14:39 [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend Corey Bryant
@ 2013-11-19 19:37 ` Stefan Berger
  0 siblings, 0 replies; 11+ messages in thread
From: Stefan Berger @ 2013-11-19 19:37 UTC (permalink / raw)
  To: Corey Bryant; +Cc: qemu-devel, aliguori

On 11/06/2013 09:39 AM, Corey Bryant wrote:
> This patch provides a software TPM backend implementation. The
> core software TPM functionality is provided by the libtpms
> library.  With this patch, multiple guests can run with their
> own emulated TPMs.
>
> The libtpms repository can be found at:
> https://github.com/coreycb/libtpms
>
> Signed-off-by: Corey Bryant <coreyb@linux.vnet.ibm.com>
> ---
>   configure            |   24 ++
>   hw/tpm/Makefile.objs |    2 +
>   hw/tpm/tpm_libtpms.c |  885 ++++++++++++++++++++++++++++++++++++++++++++++++++
>   qemu-options.hx      |   31 ++-
>   tpm.c                |    2 +-
>   5 files changed, 941 insertions(+), 3 deletions(-)
>   create mode 100644 hw/tpm/tpm_libtpms.c
>
> diff --git a/configure b/configure
> index 9addff1..085142f 100755
> --- a/configure
> +++ b/configure
> @@ -2475,6 +2475,26 @@ else
>   fi
>
>   ##########################################
> +# TPM libtpms probe
> +
> +tpm_libtpms=no
> +if test "$tpm" != "no" ; then
> +cat > $TMPC <<EOF
> +#include <libtpms/tpm_library.h>
> +#include <libtpms/tpm_error.h>
> +#include <libtpms/tpm_memory.h>
> +#include <libtpms/tpm_nvfilename.h>
> +#include <libtpms/tpm_tis.h>
> +int main(int argc, char **argv) { TPMLIB_GetVersion(); return 0; }
> +EOF
> +  libtpms_libs=`$pkg_config libtpms --libs 2> /dev/null`
> +  if compile_prog "$libtpms_libs" ; then
> +    tpm_libtpms=$tpm
> +    libs_softmmu="$libs_softmmu $libtpms_libs"
> +  fi
> +fi
> +
> +##########################################
>   # adjust virtio-blk-data-plane based on linux-aio
>
>   if test "$virtio_blk_data_plane" = "yes" -a \
> @@ -3746,6 +3766,7 @@ echo "gcov enabled      $gcov"
>   echo "TPM support       $tpm"
>   echo "libssh2 support   $libssh2"
>   echo "TPM passthrough   $tpm_passthrough"
> +echo "TPM libtpms       $tpm_libtpms"
>   echo "QOM debugging     $qom_cast_debug"
>
>   if test "$sdl_too_old" = "yes"; then
> @@ -4154,6 +4175,9 @@ if test "$tpm" = "yes"; then
>     if test "$tpm_passthrough" = "yes"; then
>       echo "CONFIG_TPM_PASSTHROUGH=y" >> $config_host_mak
>     fi
> +  if test "$tpm_libtpms" = "yes"; then
> +    echo "CONFIG_TPM_LIBTPMS=y" >> $config_host_mak
> +  fi
>   fi
>
>   # use default implementation for tracing backend-specific routines
> diff --git a/hw/tpm/Makefile.objs b/hw/tpm/Makefile.objs
> index 99f5983..77e9065 100644
> --- a/hw/tpm/Makefile.objs
> +++ b/hw/tpm/Makefile.objs
> @@ -1,2 +1,4 @@
>   common-obj-$(CONFIG_TPM_TIS) += tpm_tis.o
>   common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o
> +common-obj-$(CONFIG_TPM_LIBTPMS) += tpm_libtpms.o
> +common-obj-$(CONFIG_TPM_LIBTPMS) += tpm_nvram.o
> diff --git a/hw/tpm/tpm_libtpms.c b/hw/tpm/tpm_libtpms.c
> new file mode 100644
> index 0000000..f9c1f80
> --- /dev/null
> +++ b/hw/tpm/tpm_libtpms.c
> @@ -0,0 +1,885 @@
> +/*
> + * libtpms TPM driver
> + *
> + * Copyright (C) 2013 IBM Corporation
> + *
> + * Authors:
> + *  Stefan Berger   <stefanb@us.ibm.com>
> + *  Corey Bryant    <coreyb@linux.vnet.ibm.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or later.
> + * See the COPYING file in the top-level directory.
> + */
> +
> +#include "sysemu/tpm_backend.h"
> +#include "tpm_int.h"
> +#include "tpm_nvram.h"
> +#include "qapi/qmp/qerror.h"
> +#include "migration/migration.h"
> +#include "sysemu/tpm_backend_int.h"
> +
> +#include <libtpms/tpm_library.h>
> +#include <libtpms/tpm_error.h>
> +#include <libtpms/tpm_memory.h>
> +#include <libtpms/tpm_nvfilename.h>
> +#include <libtpms/tpm_tis.h>
> +
> +/* #define DEBUG_TPM */
> +
> +#ifdef DEBUG_TPM
> +#define DPRINTF(fmt, ...) \
> +    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
> +#define DPRINTF_BUFFER(buffer, len) \
> +    do { tpm_ltpms_dump_buffer(stderr, buffer, len); } while (0)
> +#else
> +#define DPRINTF(fmt, ...) \
> +    do { } while (0)
> +#define DPRINTF_BUFFER(buffer, len) \
> +    do { } while (0)
> +#endif
> +
> +#define NVRAM_BLOB_OFFSET_FROM_ENTRY(entry_offset) \
> +    (entry_offset + sizeof(uint32_t))
> +
> +#define TYPE_TPM_LIBTPMS "tpm-libtpms"
> +#define TPM_LIBTPMS(obj) \
> +    OBJECT_CHECK(TPMLTPMsState, (obj), TYPE_TPM_LIBTPMS)
> +
> +static const TPMDriverOps tpm_ltpms_driver;
> +
> +/* data structures */
> +typedef struct TPMLTPMsThreadParams {
> +    TPMState *tpm_state;
> +
> +    TPMRecvDataCB *recv_data_callback;
> +    TPMBackend *tb;
> +} TPMLTPMsThreadParams;
> +
> +struct NVRAMEntry {
> +    uint32_t cur_size;
> +    uint8_t *buffer;
> +};
> +
> +typedef struct NVRAMEntry NVRAMEntry;
> +
> +struct TPMLTPMsState {
> +    TPMBackend parent;
> +
> +    TPMBackendThread tbt;
> +
> +    TPMLTPMsThreadParams tpm_thread_params;
> +
> +    bool tpm_initialized;
> +    bool had_fatal_error;
> +
> +    BlockDriverState *bdrv;
> +
> +    NVRAMEntry *perm_state_entry;
> +    NVRAMEntry *save_state_entry;
> +    NVRAMEntry *vola_state_entry;
> +
> +    uint32_t perm_state_entry_offset;
> +    uint32_t save_state_entry_offset;
> +    uint32_t vola_state_entry_offset;
> +
> +    uint32_t perm_state_max_size;
> +    uint32_t save_state_max_size;
> +    uint32_t vola_state_max_size;
> +
> +    QemuMutex tpm_initialized_mutex;
> +
> +    uint8_t locty; /* locality of command being executed by libtpms */
> +};
> +
> +typedef struct TPMLTPMsState TPMLTPMsState;
> +
> +static TPMBackend *tpm_backend;
> +
> +/* functions */
> +
> +#ifdef DEBUG_TPM
> +static inline void tpm_ltpms_dump_buffer(FILE *stream, unsigned char *buffer,
> +                                         unsigned int len)
> +{
> +    int i;
> +
> +    for (i = 0; i < len; i++) {
> +        if (i && !(i % 16)) {
> +            fprintf(stream, "\n");
> +        }
> +        fprintf(stream, "%.2X ", buffer[i]);
> +    }
> +    fprintf(stream, "\n");
> +}
> +#endif
> +
> +static inline void tpm_ltpms_free_nvram_entry(NVRAMEntry **entry)
> +{
> +    if (*entry) {
> +        TPM_Free((unsigned char *)*entry);
> +        *entry = NULL;
> +    }
> +}
> +
> +static inline void tpm_ltpms_free_nvram_buffer(NVRAMEntry *entry)
> +{
> +    if (entry && entry->buffer) {
> +        TPM_Free(entry->buffer);
> +        entry->buffer = NULL;
> +        entry->cur_size = 0;
> +    }
> +}
> +
> +static inline void tpm_ltpms_free_nvram_all(TPMLTPMsState *tpm_ltpms)
> +{
> +    tpm_ltpms_free_nvram_buffer(tpm_ltpms->perm_state_entry);
> +    tpm_ltpms_free_nvram_buffer(tpm_ltpms->save_state_entry);
> +    tpm_ltpms_free_nvram_buffer(tpm_ltpms->vola_state_entry);
> +
> +    tpm_ltpms_free_nvram_entry(&tpm_ltpms->perm_state_entry);
> +    tpm_ltpms_free_nvram_entry(&tpm_ltpms->save_state_entry);
> +    tpm_ltpms_free_nvram_entry(&tpm_ltpms->vola_state_entry);
> +}
> +
> +/*
> + * Calls into libtpms to get a runtime property of the TPM
> + */
> +static int tpmlib_get_prop(enum TPMLIB_TPMProperty prop)
> +{
> +    int result;
> +
> +    TPM_RESULT res = TPMLIB_GetTPMProperty(prop, &result);
> +    assert(res == TPM_SUCCESS);
> +
> +    return result;
> +}
> +
> +/*
> + * Generates the drive offsets where NVRAM blobs are stored.  Each offset
> + * allows for enough room to store the current blob size plus a blob of
> + * the maximum size.
> + */
> +static void tpm_ltpms_get_nvram_offsets(TPMLTPMsState *tpm_ltpms)
> +{
> +    tpm_ltpms->perm_state_entry_offset = 0;
> +    tpm_ltpms->perm_state_max_size = tpmlib_get_prop(TPMPROP_TPM_MAX_NV_SPACE);
> +
> +    tpm_ltpms->save_state_entry_offset =
> +        ROUND_UP(tpm_ltpms->perm_state_entry_offset + sizeof(uint32_t) +
> +                 tpm_ltpms->perm_state_max_size + 1, 1024);
> +    tpm_ltpms->save_state_max_size =
> +        tpmlib_get_prop(TPMPROP_TPM_MAX_SAVESTATE_SPACE);
> +
> +    tpm_ltpms->vola_state_entry_offset =
> +        ROUND_UP(tpm_ltpms->save_state_entry_offset + sizeof(uint32_t) +
> +                 tpm_ltpms->save_state_max_size + 1, 1024);
> +    tpm_ltpms->vola_state_max_size =
> +        tpmlib_get_prop(TPMPROP_TPM_MAX_VOLATILESTATE_SPACE);
> +}
> +
> +/*
> + * Writes an NVRAM entry and it's blob to the specified drive offset
> + */
> +static int tpm_ltpms_write_to_nvram(TPMLTPMsState *tpm_ltpms, uint32_t offset,
> +                                    NVRAMEntry *entry, uint32_t max_size)
> +{
> +    int rc;
> +    uint8_t *buffer = entry->buffer;
> +    uint32_t size = entry->cur_size;
> +    BlockDriverState *bdrv = tpm_ltpms->bdrv;
> +
> +    DPRINTF("tpm_libtpms: Writing NVRAM entry to offset %"PRIu32"\n", offset);
> +
> +    if (tpm_ltpms->had_fatal_error) {
> +        return TPM_FAIL;
> +    }
> +
> +    if (size > max_size) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM blob size too big");
> +        return TPM_FAIL;
> +    }
> +
> +    DPRINTF("tpm_libtpms: current blob size = %"PRIu32"\n", size);
> +
> +    /* Write the blob */
> +    if (size > 0) {
> +        DPRINTF_BUFFER(buffer, size);
> +
> +        rc = tpm_nvram_bdrv_write(bdrv, NVRAM_BLOB_OFFSET_FROM_ENTRY(offset),
> +                                  buffer, size);
> +        if (rc != size) {
> +            qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM write failed");
> +            return rc;
> +        }
> +    }
> +
> +    /* Blob size is stored on disk in big-endian */
> +    size = cpu_to_be32(size);
> +
> +    /* Write the blob size */
> +    rc = tpm_nvram_bdrv_write(bdrv, offset, (uint8_t *)&size, sizeof(size));
> +    if (rc != sizeof(size)) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM write failed");
> +        return rc;
> +    }
> +
> +    return TPM_SUCCESS;
> +}
> +
> +/*
> + * Reads an NVRAM entry and it's blob from the specified drive offset
> + */
> +static int tpm_ltpms_read_from_nvram(TPMLTPMsState *tpm_ltpms, uint32_t offset,
> +                                     NVRAMEntry **entry, uint32_t max_size)
> +{
> +    int rc;
> +    uint8_t *buffer = NULL;
> +    uint32_t *size = NULL;
> +    BlockDriverState *bdrv = tpm_ltpms->bdrv;
> +
> +    DPRINTF("tpm_libtpms: Reading NVRAM entry from offset %"PRIu32"\n", offset);
> +
> +    if (tpm_ltpms->had_fatal_error) {
> +        return TPM_FAIL;
> +    }
> +
> +    /* Allocate the in-memory blob entry */
> +    rc = TPM_Malloc((unsigned char **)entry, sizeof(**entry));
> +    if (rc != TPM_SUCCESS) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                      "TPM memory allocation failed");
> +        abort();
> +    }
> +
> +    /* Read the blob size */
> +    rc = tpm_nvram_bdrv_read(bdrv, offset, (uint8_t **)&size, sizeof(*size));
> +    if (rc != sizeof(*size)) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM read failed");
> +        goto err_exit;
> +    }
> +
> +    /* Blob size is stored on disk in big-endian */
> +    *size = be32_to_cpu(*size);
> +
> +    if (*size > max_size) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM blob size too big");
> +        rc = TPM_FAIL;
> +        goto err_exit;
> +    }
> +
> +    DPRINTF("tpm_libtpms: current blob size = %"PRIu32"\n", *size);
> +
> +    (*entry)->cur_size = *size;
> +    (*entry)->buffer = NULL;
> +
> +    /* Read the blob */
> +    if (*size > 0) {
> +        rc = tpm_nvram_bdrv_read(bdrv, NVRAM_BLOB_OFFSET_FROM_ENTRY(offset),
> +                                 &buffer, *size);
> +        if (rc != *size) {
> +            qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM read failed");
> +            goto err_exit;
> +        }
> +
> +        (*entry)->buffer = buffer;
> +
> +        DPRINTF_BUFFER(buffer, *size);
> +    }
> +
> +    rc = TPM_SUCCESS;
> +
> +err_exit:
> +    if (size) {

The check is not necessary.

> +        TPM_Free((uint8_t *)size);
> +    }
> +
> +    return rc;
> +}
> +
> +/*
> + * Loads the TPM's NVRAM state from NVRAM drive into memory
> + */
> +static int tpm_ltpms_load_tpm_state_from_nvram(TPMLTPMsState *tpm_ltpms)
> +{
> +    int rc;
> +
> +    rc = tpm_ltpms_read_from_nvram(tpm_ltpms,
> +                                   tpm_ltpms->perm_state_entry_offset,
> +                                   &tpm_ltpms->perm_state_entry,
> +                                   tpm_ltpms->perm_state_max_size);
> +    if (rc) {
> +        goto err_exit;
> +    }
> +
> +    rc = tpm_ltpms_read_from_nvram(tpm_ltpms,
> +                                   tpm_ltpms->save_state_entry_offset,
> +                                   &tpm_ltpms->save_state_entry,
> +                                   tpm_ltpms->save_state_max_size);
> +    if (rc) {
> +        goto err_exit;
> +    }
> +
> +    rc = tpm_ltpms_read_from_nvram(tpm_ltpms,
> +                                   tpm_ltpms->vola_state_entry_offset,
> +                                   &tpm_ltpms->vola_state_entry,
> +                                   tpm_ltpms->vola_state_max_size);
> +    if (rc) {
> +        goto err_exit;
> +    }
> +
> +    return 0;
> +
> +err_exit:
> +    tpm_ltpms->had_fatal_error = true;
> +
> +    return rc;
> +}
> +
> +/*
> + * Processes a command request by calling into libtpms, and returns
> + * result to front end
> + */
> +static void tpm_ltpms_process_request(TPMLTPMsState *tpm_ltpms,
> +                                      TPMLTPMsThreadParams *thr_parms)
> +{
> +    TPM_RESULT res;
> +    uint32_t in_len, out_len;
> +    uint8_t *in, *out;
> +    uint32_t resp_size;
> +    TPMLocality *locty_data;
> +
> +    DPRINTF("tpm_libtpms: processing command\n");
> +
> +    tpm_ltpms->locty = thr_parms->tpm_state->locty_number;
> +
> +    locty_data = thr_parms->tpm_state->locty_data;
> +
> +    in      = locty_data->w_buffer.buffer;
> +    in_len  = locty_data->w_offset;
> +    out     = locty_data->r_buffer.buffer;
> +    out_len = locty_data->r_buffer.size;
> +

You would have to grab the initialized mutex here ...

> +    if (tpm_ltpms->tpm_initialized) {
> +        DPRINTF("tpm_libtpms: received %d bytes from VM in locality %d\n",
> +                in_len, tpm_ltpms->locty);
> +        DPRINTF_BUFFER(in, in_len);
> +
> +        resp_size = 0;
> +
> +        res = TPMLIB_Process(&out, &resp_size, &out_len, in, in_len);
> +        if (res == TPM_SUCCESS) {
> +            goto send_response;
> +        }
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                      "TPM libtpms command processing failed");
> +    } else {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                      "TPM libtpms not initialized");
> +    }
> +

... and release it here to protect the TPMLIB_Process call as well.

> +    resp_size = tpm_write_fatal_error_response(out, out_len);
> +
> +send_response:
> +    DPRINTF("tpm_libtpms: sending %d bytes to TPM front-end\n", resp_size);
> +    DPRINTF_BUFFER(out, resp_size);
> +
> +    thr_parms->recv_data_callback(thr_parms->tpm_state, tpm_ltpms->locty);
> +
> +    return;
> +}
> +
> +static void tpm_ltpms_worker_thread(gpointer data, gpointer user_data)
> +{
> +    TPM_RESULT res;
> +    TPMLTPMsThreadParams *thr_parms = user_data;
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(thr_parms->tb);
> +    TPMBackendCmd cmd = (TPMBackendCmd)data;
> +
> +    tpm_backend = thr_parms->tb;
> +
> +    DPRINTF("tpm_libtpms: processing command type %d\n", cmd);
> +
> +    switch (cmd) {
> +    case TPM_BACKEND_CMD_TPM_RESET:

To be consistent, you should probably move the locking to before this check here ...


> +        if (tpm_ltpms->tpm_initialized) {
> +            qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
> +            tpm_ltpms->tpm_initialized = false;
> +            qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
> +
> +            TPMLIB_Terminate();
> +        }

and release it here.

> +        /* fall through */
> +    case TPM_BACKEND_CMD_INIT:
> +        res = TPMLIB_MainInit();
> +        if (res == TPM_SUCCESS) {
> +            qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
> +            tpm_ltpms->tpm_initialized = true;
> +            qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
> +        } else {
> +            qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                          "TPM libtpms initialization failed");
> +            abort();
> +        }
> +        break;
> +    case TPM_BACKEND_CMD_PROCESS_CMD:
> +        tpm_ltpms_process_request(tpm_ltpms, thr_parms);
> +        break;
> +    case TPM_BACKEND_CMD_END:

Same here ...

> +        if (tpm_ltpms->tpm_initialized) {
> +            qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
> +            tpm_ltpms->tpm_initialized = false;
> +            qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
> +
> +            TPMLIB_Terminate();
> +            tpm_ltpms_free_nvram_all(tpm_ltpms);
> +        }
> +        break;
> +    }
> +}
> +
> +/*****************************************************************
> + * libtpms TPM library callbacks
> + ****************************************************************/
> +
> +/*
> + * Called by libtpms before any access to persistent storage is done
> + */
> +static TPM_RESULT tpm_ltpms_nvram_init(void)
> +{
> +    int rc;
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
> +
> +    tpm_ltpms->bdrv = bdrv_find(tpm_backend->nvram_id);
> +    if (!tpm_ltpms->bdrv) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM 'nvram' drive not found");
> +        abort();
> +    }
> +
> +    rc = tpm_nvram_bdrv_init(tpm_ltpms->bdrv);
> +    if (rc) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM drive init failed");
> +        abort();
> +    }
> +
> +    tpm_ltpms_get_nvram_offsets(tpm_ltpms);
> +
> +    rc = tpm_ltpms_load_tpm_state_from_nvram(tpm_ltpms);
> +    if (rc) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR, "TPM NVRAM load state failed");
> +        abort();
> +    }
> +
> +    return TPM_SUCCESS;
> +}
> +
> +/*
> + * Called by libtpms when the TPM wants to load state from persistent
> + * storage
> + */
> +static TPM_RESULT tpm_ltpms_nvram_loaddata(unsigned char **data,
> +                                           uint32_t *length,
> +                                           uint32_t tpm_number,
> +                                           const char *name)
> +{
> +    TPM_RESULT rc = TPM_SUCCESS;
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
> +    NVRAMEntry **entry = NULL;
> +
> +    DPRINTF("tpm_libtpms: Loading NVRAM state '%s' from storage\n", name);
> +
> +    if (tpm_ltpms->had_fatal_error) {
> +        return TPM_FAIL;
> +    }
> +
> +    *length = 0;
> +
> +    if (!strcmp(name, TPM_PERMANENT_ALL_NAME)) {
> +        entry = &tpm_ltpms->perm_state_entry;
> +    } else if (!strcmp(name, TPM_SAVESTATE_NAME)) {
> +        entry = &tpm_ltpms->save_state_entry;
> +    } else if (!strcmp(name, TPM_VOLATILESTATE_NAME)) {
> +        entry = &tpm_ltpms->vola_state_entry;
> +    }
> +
> +    /* In-memory entries are allocated for the life of the backend */
> +    assert(entry != NULL);
> +
> +    *length = (*entry)->cur_size;
> +    if (*length > 0) {
> +        rc = TPM_Malloc(data, *length);
> +        if (rc == TPM_SUCCESS) {
> +            memcpy(*data, (*entry)->buffer, *length);
> +        } else {
> +            qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                          "TPM memory allocation failed");
> +            abort();
> +        }
> +    }
> +
> +    if (*length == 0) {
> +        rc = TPM_RETRY;
> +    }
> +
> +    DPRINTF("tpm_libtpms: Read %"PRIu32" bytes from storage\n", *length);
> +
> +    return rc;
> +}
> +
> +/*
> + * Called by libtpms when the TPM wants to store state to persistent
> + * storage
> + */
> +static TPM_RESULT tpm_ltpms_nvram_storedata(const unsigned char *data,
> +                                            uint32_t length,
> +                                            uint32_t tpm_number,
> +                                            const char *name)
> +{
> +    TPM_RESULT rc = TPM_SUCCESS;
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
> +    NVRAMEntry *entry = NULL;
> +    uint32_t offset = 0, max_size = 0;
> +
> +    DPRINTF("tpm_libtpms: Storing NVRAM state '%s' to storage\n", name);
> +
> +    if (tpm_ltpms->had_fatal_error) {
> +        return TPM_FAIL;
> +    }
> +
> +    if (!strcmp(name, TPM_PERMANENT_ALL_NAME)) {
> +        entry = tpm_ltpms->perm_state_entry;
> +        offset = tpm_ltpms->perm_state_entry_offset;
> +        max_size = tpm_ltpms->perm_state_max_size;
> +    } else if (!strcmp(name, TPM_SAVESTATE_NAME)) {
> +        entry = tpm_ltpms->save_state_entry;
> +        offset = tpm_ltpms->save_state_entry_offset;
> +        max_size = tpm_ltpms->save_state_max_size;
> +    } else if (!strcmp(name, TPM_VOLATILESTATE_NAME)) {
> +        entry = tpm_ltpms->vola_state_entry;
> +        offset = tpm_ltpms->vola_state_entry_offset;
> +        max_size = tpm_ltpms->vola_state_max_size;
> +    }
> +
> +    /* In-memory entries are allocated for the life of the backend */
> +    assert(entry != NULL);
> +
> +    if (length > 0) {
> +        rc = TPM_Realloc(&entry->buffer, length);
> +        if (rc != TPM_SUCCESS) {
> +            qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                          "TPM memory allocation failed");
> +            abort();
> +        }
> +        memcpy(entry->buffer, data, length);
> +        entry->cur_size = length;
> +    } else {
> +        tpm_ltpms_free_nvram_buffer(entry);
> +    }
> +
> +    if (tpm_ltpms_write_to_nvram(tpm_ltpms, offset, entry, max_size)) {
> +        goto err_exit;
> +    }
> +
> +    DPRINTF("tpm_libtpms: Wrote %"PRIu32" bytes to storage\n", length);
> +
> +    return rc;
> +
> +err_exit:
> +    tpm_ltpms->had_fatal_error = true;
> +
> +    return TPM_FAIL;
> +}
> +
> +/*
> + * Called by libtpms when the TPM wants to delete state from persistent
> + * storage
> + */
> +static TPM_RESULT tpm_ltpms_nvram_deletename(uint32_t tpm_number,
> +                                             const char *name,
> +                                             TPM_BOOL mustExist)
> +{
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
> +    NVRAMEntry *entry = NULL;
> +    uint32_t offset = 0, max_size = 0;
> +
> +    DPRINTF("tpm_libtpms: Deleting NVRAM state '%s' from storage\n", name);
> +
> +    if (tpm_ltpms->had_fatal_error) {
> +        return TPM_FAIL;
> +    }
> +
> +    if (!strcmp(name, TPM_PERMANENT_ALL_NAME)) {
> +        entry = tpm_ltpms->perm_state_entry;
> +        offset = tpm_ltpms->perm_state_entry_offset;
> +        max_size = tpm_ltpms->perm_state_max_size;
> +    } else if (!strcmp(name, TPM_SAVESTATE_NAME)) {
> +        entry = tpm_ltpms->save_state_entry;
> +        offset = tpm_ltpms->save_state_entry_offset;
> +        max_size = tpm_ltpms->save_state_max_size;
> +    } else if (!strcmp(name, TPM_VOLATILESTATE_NAME)) {
> +        entry = tpm_ltpms->vola_state_entry;
> +        offset = tpm_ltpms->vola_state_entry_offset;
> +        max_size = tpm_ltpms->vola_state_max_size;
> +    }
> +
> +    if (entry) {
> +        tpm_ltpms_free_nvram_buffer(entry);
> +
> +        if (tpm_ltpms_write_to_nvram(tpm_ltpms, offset, entry, max_size)) {
> +            goto err_exit;
> +        }
> +    }
> +
> +    DPRINTF("tpm_libtpms: Deleted NVRAM state '%s' from storage\n", name);
> +
> +    return TPM_SUCCESS;
> +
> +err_exit:
> +    tpm_ltpms->had_fatal_error = true;
> +
> +    return TPM_FAIL;
> +}
> +
> +/*
> + * Called by libtpms to initialize the I/O subsystem of the TPM
> + */
> +static TPM_RESULT tpm_ltpms_io_init(void)
> +{
> +    return TPM_SUCCESS;
> +}
> +
> +/*
> + * Called by libtpms when the TPM needs to determine the locality under
> + * which a command is supposed to be executed
> + */
> +static TPM_RESULT tpm_ltpms_io_getlocality(TPM_MODIFIER_INDICATOR *
> +                                           localityModifier,
> +                                           uint32_t tpm_number)
> +{
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tpm_backend);
> +
> +    *localityModifier = (TPM_MODIFIER_INDICATOR)tpm_ltpms->locty;
> +
> +    return TPM_SUCCESS;
> +}
> +
> +/*
> + * Called by libtpms when the TPM needs to determine whether physical
> + * presence has been asserted
> + */
> +static TPM_RESULT tpm_ltpms_io_getphysicalpresence(TPM_BOOL *physicalPresence,
> +                                                   uint32_t tpm_number)
> +{
> +    *physicalPresence = FALSE;
> +
> +    return TPM_SUCCESS;
> +}
> +
> +struct libtpms_callbacks callbacks = {
> +    .sizeOfStruct               = sizeof(struct libtpms_callbacks),
> +    .tpm_nvram_init             = tpm_ltpms_nvram_init,
> +    .tpm_nvram_loaddata         = tpm_ltpms_nvram_loaddata,
> +    .tpm_nvram_storedata        = tpm_ltpms_nvram_storedata,
> +    .tpm_nvram_deletename       = tpm_ltpms_nvram_deletename,
> +    .tpm_io_init                = tpm_ltpms_io_init,
> +    .tpm_io_getlocality         = tpm_ltpms_io_getlocality,
> +    .tpm_io_getphysicalpresence = tpm_ltpms_io_getphysicalpresence,
> +};
> +
> +/*****************************************************************/
> +
> +/*
> + * Start the TPM (thread).  If it had been started before, then terminate
> + * and start it again.
> + */
> +static int tpm_ltpms_startup_tpm(TPMBackend *tb)
> +{
> +    struct TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
> +
> +    /* 'power-reset' a running TPM; if none is running start one */
> +    tpm_backend_thread_tpm_reset(&tpm_ltpms->tbt, tpm_ltpms_worker_thread,
> +                                 &tpm_ltpms->tpm_thread_params);
> +
> +    return 0;
> +}
> +
> +static void tpm_ltpms_terminate_tpm_thread(TPMBackend *tb)
> +{
> +    struct TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
> +
> +    tpm_backend_thread_end(&tpm_ltpms->tbt);
> +}
> +
> +static void tpm_ltpms_reset(TPMBackend *tb)
> +{
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
> +
> +    DPRINTF("tpm_libtpms: Resetting TPM libtpms backend\n");
> +
> +    tpm_ltpms_terminate_tpm_thread(tb);
> +
> +    tpm_ltpms->had_fatal_error = false;
> +}
> +
> +static int tpm_ltpms_init(TPMBackend *tb, TPMState *s,
> +                          TPMRecvDataCB *recv_data_cb)
> +{
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
> +
> +    if (TPMLIB_RegisterCallbacks(&callbacks) != TPM_SUCCESS) {
> +        qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                      "TPM libtpms callback registration failed");
> +        return -1;
> +    }
> +
> +    tpm_ltpms->tpm_thread_params.tpm_state = s;
> +    tpm_ltpms->tpm_thread_params.recv_data_callback = recv_data_cb;
> +    tpm_ltpms->tpm_thread_params.tb = tb;
> +
> +    qemu_mutex_init(&tpm_ltpms->tpm_initialized_mutex);
> +
> +    return 0;
> +}
> +
> +static bool tpm_ltpms_get_tpm_established_flag(TPMBackend *tb)
> +{
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
> +    TPM_BOOL tpmEstablished = false;
> +
> +    qemu_mutex_lock(&tpm_ltpms->tpm_initialized_mutex);
> +    if (tpm_ltpms->tpm_initialized) {
> +        TPM_IO_TpmEstablished_Get(&tpmEstablished);
> +    }
> +    qemu_mutex_unlock(&tpm_ltpms->tpm_initialized_mutex);
> +
> +    return tpmEstablished;
> +}
> +
> +static bool tpm_ltpms_get_startup_error(TPMBackend *tb)
> +{
> +    return false;
> +}
> +
> +static size_t tpm_ltpms_realloc_buffer(TPMSizedBuffer *sb)
> +{
> +    size_t wanted_size = tpmlib_get_prop(TPMPROP_TPM_BUFFER_MAX);
> +
> +    if (sb->size != wanted_size) {
> +        TPM_RESULT res = TPM_Realloc(&sb->buffer, wanted_size);
> +        if (res == TPM_SUCCESS) {
> +            sb->size = wanted_size;
> +        } else {
> +            qerror_report(ERROR_CLASS_GENERIC_ERROR,
> +                          "TPM memory allocation failed");
> +            abort();
> +        }
> +    }
> +    return sb->size;
> +}
> +
> +static void tpm_ltpms_deliver_request(TPMBackend *tb)
> +{
> +    TPMLTPMsState *tpm_ltpms = TPM_LIBTPMS(tb);
> +
> +    tpm_backend_thread_deliver_request(&tpm_ltpms->tbt);
> +}
> +
> +static void tpm_ltpms_cancel_cmd(TPMBackend *be)
> +{
> +}
> +
> +static const char *tpm_ltpms_create_desc(void)
> +{
> +    return "libtpms TPM backend driver";
> +}
> +
> +static TPMBackend *tpm_ltpms_create(QemuOpts *opts, const char *id)
> +{
> +    Object *obj = object_new(TYPE_TPM_LIBTPMS);
> +    TPMBackend *tb = TPM_BACKEND(obj);
> +    const char *value;
> +
> +    tb->id = g_strdup(id);
> +    tb->fe_model = -1;
> +    tb->ops = &tpm_ltpms_driver;
> +
> +    value = qemu_opt_get(opts, "nvram");
> +    if (!value) {
> +        qerror_report(QERR_MISSING_PARAMETER, "nvram");
> +        goto err_exit;
> +    }
> +    tb->nvram_id = g_strdup(value);
> +
> +    return tb;
> +
> +err_exit:
> +    g_free(tb->id);
> +
> +    return NULL;
> +}
> +
> +static void tpm_ltpms_destroy(TPMBackend *tb)
> +{
> +    tpm_ltpms_terminate_tpm_thread(tb);
> +
> +    g_free(tb->id);
> +    g_free(tb->nvram_id);
> +}
> +
> +static const QemuOptDesc tpm_ltpms_cmdline_opts[] = {
> +    TPM_STANDARD_CMDLINE_OPTS,
> +    {
> +        .name = "nvram",
> +        .type = QEMU_OPT_STRING,
> +        .help = "NVRAM drive id",
> +    },
> +    { /* end of list */ },
> +};
> +
> +static const TPMDriverOps tpm_ltpms_driver = {
> +    .type                     = TPM_TYPE_LIBTPMS,
> +    .opts                     = tpm_ltpms_cmdline_opts,
> +    .desc                     = tpm_ltpms_create_desc,
> +    .create                   = tpm_ltpms_create,
> +    .destroy                  = tpm_ltpms_destroy,
> +    .init                     = tpm_ltpms_init,
> +    .startup_tpm              = tpm_ltpms_startup_tpm,
> +    .realloc_buffer           = tpm_ltpms_realloc_buffer,
> +    .reset                    = tpm_ltpms_reset,
> +    .had_startup_error        = tpm_ltpms_get_startup_error,
> +    .deliver_request          = tpm_ltpms_deliver_request,
> +    .cancel_cmd               = tpm_ltpms_cancel_cmd,
> +    .get_tpm_established_flag = tpm_ltpms_get_tpm_established_flag,
> +};
> +
> +
> +static void tpm_ltpms_inst_init(Object *obj)
> +{
> +}
> +
> +static void tpm_ltpms_inst_finalize(Object *obj)
> +{
> +}
> +
> +static void tpm_ltpms_class_init(ObjectClass *klass, void *data)
> +{
> +    TPMBackendClass *tbc = TPM_BACKEND_CLASS(klass);
> +
> +    tbc->ops = &tpm_ltpms_driver;
> +}
> +
> +static const TypeInfo tpm_ltpms_info = {
> +    .name = TYPE_TPM_LIBTPMS,
> +    .parent = TYPE_TPM_BACKEND,
> +    .instance_size = sizeof(TPMLTPMsState),
> +    .class_init = tpm_ltpms_class_init,
> +    .instance_init = tpm_ltpms_inst_init,
> +    .instance_finalize = tpm_ltpms_inst_finalize,
> +};
> +
> +static void tpm_libtpms_register(void)
> +{
> +    type_register_static(&tpm_ltpms_info);
> +    tpm_register_driver(&tpm_ltpms_driver);
> +}
> +
> +type_init(tpm_libtpms_register)
> diff --git a/qemu-options.hx b/qemu-options.hx
> index 5dc8b75..356978e 100644
> --- a/qemu-options.hx
> +++ b/qemu-options.hx
> @@ -2251,7 +2251,9 @@ DEF("tpmdev", HAS_ARG, QEMU_OPTION_tpmdev, \
>       "-tpmdev passthrough,id=id[,path=path][,cancel-path=path]\n"
>       "                use path to provide path to a character device; default is /dev/tpm0\n"
>       "                use cancel-path to provide path to TPM's cancel sysfs entry; if\n"
> -    "                not provided it will be searched for in /sys/class/misc/tpm?/device\n",
> +    "                not provided it will be searched for in /sys/class/misc/tpm?/device\n"
> +    "-tpmdev libtpms,id=id,nvram=drive-id\n"
> +    "                use nvram to provide the NVRAM drive id\n",
>       QEMU_ARCH_ALL)
>   STEXI
>
> @@ -2261,7 +2263,8 @@ The general form of a TPM device option is:
>   @item -tpmdev @var{backend} ,id=@var{id} [,@var{options}]
>   @findex -tpmdev
>   Backend type must be:
> -@option{passthrough}.
> +@option{passthrough}, or
> +@option{libtpms}.
>
>   The specific backend type will determine the applicable options.
>   The @code{-tpmdev} option creates the TPM backend and requires a
> @@ -2311,6 +2314,30 @@ To create a passthrough TPM use the following two options:
>   Note that the @code{-tpmdev} id is @code{tpm0} and is referenced by
>   @code{tpmdev=tpm0} in the device option.
>
> +@item -tpmdev libtpms, id=@var{id}, nvram=@var{drive-id}
> +
> +Enable access to the libtpms-based emulated TPM.
> +
> +@option{nvram} specifies the drive id of the NVRAM drive.
> +
> +Some notes about using the libtpms-based emulated TPM:
> +
> +To create a libtpms-based TPM, use the following options:
> +@example
> +-drive file=<path to image file>,if=none,id=tpm-nvram \
> +-tpmdev libtpms,id=tpm0,nvram=tpm-nvram \
> +-device tpm-tis,tpmdev=tpm0
> +@end example
> +
> +The @code{drive} option provides the path to the image file where the
> +TPM's persistent NVRAM data will be stored. Using the @code{qemu-img} tool,
> +such an image can be created with a size of 500K.
> +
> +Note that the @code{-tpmdev} id is @code{tpm0} and is referenced by
> +@code{tpmdev=tpm0} in the @code{-device} option. Similarly, the @code{-drive}
> +id @code{tpm-nvram} is referenced by @code{nvram=tpm-nvram} in the
> +@code{-tpmdev} option.
> +
>   @end table
>
>   ETEXI
> diff --git a/tpm.c b/tpm.c
> index 2f4ef52..a08e3fd 100644
> --- a/tpm.c
> +++ b/tpm.c
> @@ -26,7 +26,7 @@ static QLIST_HEAD(, TPMBackend) tpm_backends =
>
>
>   #define TPM_MAX_MODELS      1
> -#define TPM_MAX_DRIVERS     1
> +#define TPM_MAX_DRIVERS     2
>
>   static TPMDriverOps const *be_drivers[TPM_MAX_DRIVERS] = {
>       NULL,

Stefan

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
@ 2013-11-25  3:36 Xu, Quan
  2013-11-25 13:53 ` Corey Bryant
  0 siblings, 1 reply; 11+ messages in thread
From: Xu, Quan @ 2013-11-25  3:36 UTC (permalink / raw)
  To: coreyb@linux.vnet.ibm.com; +Cc: qemu-devel@nongnu.org, Xu, Quan

[-- Attachment #1: Type: text/plain, Size: 267 bytes --]

Bryant,

     I found that there is some conflict in qemu-options.hx between your patch and qemu-1.7.0-rc1.tar.bz2<http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.

What QEMU version does this patch base on? Thanks.





Quan Xu

Intel



[-- Attachment #2: Type: text/html, Size: 2931 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-11-25  3:36 Xu, Quan
@ 2013-11-25 13:53 ` Corey Bryant
  2013-11-26  3:04   ` Xu, Quan
  0 siblings, 1 reply; 11+ messages in thread
From: Corey Bryant @ 2013-11-25 13:53 UTC (permalink / raw)
  To: Xu, Quan; +Cc: qemu-devel@nongnu.org



On 11/24/2013 10:36 PM, Xu, Quan wrote:
> Bryant,
>
>       I found that there is some conflict in qemu-options.hx between your patch andqemu-1.7.0-rc1.tar.bz2  <http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.
>
> What QEMU version does this patch base on? Thanks.
>
>
>
>
>
> Quan Xu
>
> Intel
>
>
>

Thanks Quan.  I believe I built these on top of commit 
c2d30667760e3d7b81290d801e567d4f758825ca.  I don't think this series is 
going to make it upstream though so I likely won't be submitting a v2.

-- 
Regards,
Corey Bryant

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-11-25 13:53 ` Corey Bryant
@ 2013-11-26  3:04   ` Xu, Quan
  2013-11-26 14:40     ` Corey Bryant
  0 siblings, 1 reply; 11+ messages in thread
From: Xu, Quan @ 2013-11-26  3:04 UTC (permalink / raw)
  To: Corey Bryant; +Cc: qemu-devel@nongnu.org

     Thanks Bryant, this problem has been solved by following "http://www.mail-archive.com/qemu-devel@nongnu.org/msg200808.html".
     But there is another problem when run configure with "./configure --target-list=x86_64-softmmu --enable-tpm". The value of "libtpms" is still "no". when I modified "tpm_libtpms" to "yes" in configure file directly and make, then reported with error 
"hw/tpm/tpm_libtpms.c:21:33: fatal error: libtpms/tpm_library.h: No such file or directory".  Now I am installing libtpms with https://github.com/coreycb/libtpms for libtpms lib. Could you share specific step to configure QEMU based on your patch, if it comes 
easily to you?
     BTW, one target of my team is enabling stubdom vtpm for HVM virtual machine on Xen virtualization, your patches and seabios are big breakthroughs. My team is very interested to collaborate with you / Qemu community on similar areas.  

I'd be really pleased if you can help me on these issues.

Quan Xu 
Intel 


> -----Original Message-----
> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
> Sent: Monday, November 25, 2013 9:53 PM
> To: Xu, Quan
> Cc: qemu-devel@nongnu.org
> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM
> backend
> 
> 
> 
> On 11/24/2013 10:36 PM, Xu, Quan wrote:
> > Bryant,
> >
> >       I found that there is some conflict in qemu-options.hx between your
> patch andqemu-1.7.0-rc1.tar.bz2
> <http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.
> >
> > What QEMU version does this patch base on? Thanks.
> >
> >
> >
> >
> >
> > Quan Xu
> >
> > Intel
> >
> >
> >
> 
> Thanks Quan.  I believe I built these on top of commit
> c2d30667760e3d7b81290d801e567d4f758825ca.  I don't think this series is going
> to make it upstream though so I likely won't be submitting a v2.
> 
> --
> Regards,
> Corey Bryant

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-11-26  3:04   ` Xu, Quan
@ 2013-11-26 14:40     ` Corey Bryant
  2013-12-02  4:00       ` Xu, Quan
  0 siblings, 1 reply; 11+ messages in thread
From: Corey Bryant @ 2013-11-26 14:40 UTC (permalink / raw)
  To: Xu, Quan; +Cc: qemu-devel@nongnu.org


On 11/25/2013 10:04 PM, Xu, Quan wrote:
>       Thanks Bryant, this problem has been solved by following "http://www.mail-archive.com/qemu-devel@nongnu.org/msg200808.html".
>       But there is another problem when run configure with "./configure --target-list=x86_64-softmmu --enable-tpm". The value of "libtpms" is still "no". when I modified "tpm_libtpms" to "yes" in configure file directly and make, then reported with error
> "hw/tpm/tpm_libtpms.c:21:33: fatal error: libtpms/tpm_library.h: No such file or directory".  Now I am installing libtpms with https://github.com/coreycb/libtpms for libtpms lib. Could you share specific step to configure QEMU based on your patch, if it comes
> easily to you?

Here's what I've been using to build libtpms:

$ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions 
-fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic'
$ export CFLAGS
$ ./configure --build=x86_64-redhat-linux-gnu --prefix=/usr 
--libdir=/usr/lib64
$ make
$ sudo make install

And then the configure you're using above should work for QEMU.

>       BTW, one target of my team is enabling stubdom vtpm for HVM virtual machine on Xen virtualization, your patches and seabios are big breakthroughs. My team is very interested to collaborate with you / Qemu community on similar areas.

That's great to hear!

Unfortunately, the current approach of linking QEMU against libtpms 
doesn't look like it's going to make it upstream.  So it looks like we 
need to take a different approach.

Btw, I thought Xen already had TPM support.  Is that not supported in 
stubdom's?

-- 
Regards,
Corey Bryant

>
> I'd be really pleased if you can help me on these issues.
>
> Quan Xu
> Intel
>
>
>> -----Original Message-----
>> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
>> Sent: Monday, November 25, 2013 9:53 PM
>> To: Xu, Quan
>> Cc: qemu-devel@nongnu.org
>> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM
>> backend
>>
>>
>>
>> On 11/24/2013 10:36 PM, Xu, Quan wrote:
>>> Bryant,
>>>
>>>        I found that there is some conflict in qemu-options.hx between your
>> patch andqemu-1.7.0-rc1.tar.bz2
>> <http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.
>>>
>>> What QEMU version does this patch base on? Thanks.
>>>
>>>
>>>
>>>
>>>
>>> Quan Xu
>>>
>>> Intel
>>>
>>>
>>>
>>
>> Thanks Quan.  I believe I built these on top of commit
>> c2d30667760e3d7b81290d801e567d4f758825ca.  I don't think this series is going
>> to make it upstream though so I likely won't be submitting a v2.
>>
>> --
>> Regards,
>> Corey Bryant
>
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-11-26 14:40     ` Corey Bryant
@ 2013-12-02  4:00       ` Xu, Quan
  2013-12-02 14:16         ` Corey Bryant
  0 siblings, 1 reply; 11+ messages in thread
From: Xu, Quan @ 2013-12-02  4:00 UTC (permalink / raw)
  To: Corey Bryant; +Cc: qemu-devel@nongnu.org, Xu, Quan



> -----Original Message-----
> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
> Sent: Tuesday, November 26, 2013 10:40 PM
> To: Xu, Quan
> Cc: qemu-devel@nongnu.org
> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM
> backend
> 
> 
> On 11/25/2013 10:04 PM, Xu, Quan wrote:
> >       Thanks Bryant, this problem has been solved by following
> "http://www.mail-archive.com/qemu-devel@nongnu.org/msg200808.html".
> >       But there is another problem when run configure with
> > "./configure --target-list=x86_64-softmmu --enable-tpm". The value of
> > "libtpms" is still "no". when I modified "tpm_libtpms" to "yes" in
> > configure file directly and make, then reported with error
> > "hw/tpm/tpm_libtpms.c:21:33: fatal error: libtpms/tpm_library.h: No
> > such file or directory".  Now I am installing libtpms with
> https://github.com/coreycb/libtpms for libtpms lib. Could you share specific step
> to configure QEMU based on your patch, if it comes easily to you?
> 
> Here's what I've been using to build libtpms:
> 
> $ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions
> -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic'
> $ export CFLAGS
> $ ./configure --build=x86_64-redhat-linux-gnu --prefix=/usr
> --libdir=/usr/lib64
> $ make
> $ sudo make install
> 
> And then the configure you're using above should work for QEMU.



    Sorry for my delay to answer you. I had a cold and took a sick leave at last Friday. 

    Now I have setup QEMU with your patch. Start VM with below command:
== 
   qemu-system-x86_64 -m 1024 -hda rhel.raw -nographic -vnc :1 -drive file=nvram.qcow2,if=none,id=nvram0-0-0,format=qcow2 -device tpm-tis,tpmdev=tpm-tpm0,id=tpm0 -tpmdev libtpms,id=tpm-tpm0,nvram=nvram0-0-0 -net nic -net tap,ifname=tap0,script=no
== 

rhel.raw is Red Hat 6.4 image. Also I have rebuild kernel with TPM 1.2 driver in VM. But I still can't find " /sys/class/misc/tpm0/ ". 

    Does it need SeaBios bios.bin to make it work?  If need bios.bin, could you send me a bios.bin and tell me how to enable bios.bin with your patch?

BTW, I found a SeaBios patch:( Add TPM support to SeaBIOS) http://www.seabios.org/pipermail/seabios/2011-April/001609.html. 



> 
> >       BTW, one target of my team is enabling stubdom vtpm for HVM virtual
> machine on Xen virtualization, your patches and seabios are big breakthroughs.
> My team is very interested to collaborate with you / Qemu community on similar
> areas.
> 
> That's great to hear!
> 
> Unfortunately, the current approach of linking QEMU against libtpms doesn't look
> like it's going to make it upstream.  So it looks like we need to take a different
> approach.



My team is very interested to collaborate to make it upstream. Let's do it together.


> 
> Btw, I thought Xen already had TPM support.  Is that not supported in
> stubdom's?



In Xen 4.3, Xen supports vtpm in stubdom for para-virtualization virtual machine only. 
My team is focusing on enabling stubdom vtpm for HVM virtual machine.



> 
> --
> Regards,
> Corey Bryant
> 
> >
> > I'd be really pleased if you can help me on these issues.
> >
> > Quan Xu
> > Intel
> >
> >
> >> -----Original Message-----
> >> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
> >> Sent: Monday, November 25, 2013 9:53 PM
> >> To: Xu, Quan
> >> Cc: qemu-devel@nongnu.org
> >> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software
> >> TPM backend
> >>
> >>
> >>
> >> On 11/24/2013 10:36 PM, Xu, Quan wrote:
> >>> Bryant,
> >>>
> >>>        I found that there is some conflict in qemu-options.hx
> >>> between your
> >> patch andqemu-1.7.0-rc1.tar.bz2
> >> <http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.
> >>>
> >>> What QEMU version does this patch base on? Thanks.
> >>>
> >>>
> >>>
> >>>
> >>>
> >>> Quan Xu
> >>>
> >>> Intel
> >>>
> >>>
> >>>
> >>
> >> Thanks Quan.  I believe I built these on top of commit
> >> c2d30667760e3d7b81290d801e567d4f758825ca.  I don't think this series
> >> is going to make it upstream though so I likely won't be submitting a v2.
> >>
> >> --
> >> Regards,
> >> Corey Bryant
> >
> >

Quan Xu
Intel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-12-02  4:00       ` Xu, Quan
@ 2013-12-02 14:16         ` Corey Bryant
  2013-12-02 15:21           ` Stefan Berger
                             ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Corey Bryant @ 2013-12-02 14:16 UTC (permalink / raw)
  To: Xu, Quan, Stefan Berger; +Cc: qemu-devel@nongnu.org

[-- Attachment #1: Type: text/plain, Size: 5553 bytes --]



On 12/01/2013 11:00 PM, Xu, Quan wrote:
>
>
>> -----Original Message-----
>> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
>> Sent: Tuesday, November 26, 2013 10:40 PM
>> To: Xu, Quan
>> Cc: qemu-devel@nongnu.org
>> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM
>> backend
>>
>>
>> On 11/25/2013 10:04 PM, Xu, Quan wrote:
>>>        Thanks Bryant, this problem has been solved by following
>> "http://www.mail-archive.com/qemu-devel@nongnu.org/msg200808.html".
>>>        But there is another problem when run configure with
>>> "./configure --target-list=x86_64-softmmu --enable-tpm". The value of
>>> "libtpms" is still "no". when I modified "tpm_libtpms" to "yes" in
>>> configure file directly and make, then reported with error
>>> "hw/tpm/tpm_libtpms.c:21:33: fatal error: libtpms/tpm_library.h: No
>>> such file or directory".  Now I am installing libtpms with
>> https://github.com/coreycb/libtpms for libtpms lib. Could you share specific step
>> to configure QEMU based on your patch, if it comes easily to you?
>>
>> Here's what I've been using to build libtpms:
>>
>> $ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions
>> -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic'
>> $ export CFLAGS
>> $ ./configure --build=x86_64-redhat-linux-gnu --prefix=/usr
>> --libdir=/usr/lib64
>> $ make
>> $ sudo make install
>>
>> And then the configure you're using above should work for QEMU.
>
>
>
>      Sorry for my delay to answer you. I had a cold and took a sick leave at last Friday.

Not a problem.  I hope you're feeling better.

>
>      Now I have setup QEMU with your patch. Start VM with below command:
> ==
>     qemu-system-x86_64 -m 1024 -hda rhel.raw -nographic -vnc :1 -drive file=nvram.qcow2,if=none,id=nvram0-0-0,format=qcow2 -device tpm-tis,tpmdev=tpm-tpm0,id=tpm0 -tpmdev libtpms,id=tpm-tpm0,nvram=nvram0-0-0 -net nic -net tap,ifname=tap0,script=no
> ==
>
> rhel.raw is Red Hat 6.4 image. Also I have rebuild kernel with TPM 1.2 driver in VM. But I still can't find " /sys/class/misc/tpm0/ ".
>
>      Does it need SeaBios bios.bin to make it work?  If need bios.bin, could you send me a bios.bin and tell me how to enable bios.bin with your patch?

Yes it needs bios.bin.  I've attached a bios.bin that has vTPM seabios 
updates.  You should be able to copy everything from 
/usr/local/share/qemu to a new directory, and just replace the bios.bin 
in the new directory with the one I've attached.  Then point qemu at the 
new directory.

Also, make sure you enable the boot menu.  Then when you boot your guest 
you can press F11 to get a menu of TPM options to enable, disable, 
activate, deactivate, clear, etc the vTPM.

Here's some sample libvirt domain XML updates:

<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
...
<os>
   <bootmenu enable='yes'/>
</os>
...
   <qemu:commandline>
     <qemu:arg value='-drive'/>
     <qemu:arg 
value='file=/home/corey/images/nvram.raw,if=none,id=drive-nvram0-0-0,format=raw'/>
     <qemu:arg value='-tpmdev'/>
     <qemu:arg value='libtpms,id=tpm-tpm0,nvram=drive-nvram0-0-0'/>
     <qemu:arg value='-device'/>
     <qemu:arg value='tpm-tis,tpmdev=tpm-tpm0,id=tpm0'/>
     <qemu:arg value='-L'/>
     <qemu:arg value='/usr/local/share/qemu/corey_seabios/'/>
   </qemu:commandline>
...

>
> BTW, I found a SeaBios patch:( Add TPM support to SeaBIOS) http://www.seabios.org/pipermail/seabios/2011-April/001609.html.
>
>
>

Stefan, do you know if this is the same code that was used to build our 
bios.bin?

-- 
Regards,
Corey Bryant

>>
>>>        BTW, one target of my team is enabling stubdom vtpm for HVM virtual
>> machine on Xen virtualization, your patches and seabios are big breakthroughs.
>> My team is very interested to collaborate with you / Qemu community on similar
>> areas.
>>
>> That's great to hear!
>>
>> Unfortunately, the current approach of linking QEMU against libtpms doesn't look
>> like it's going to make it upstream.  So it looks like we need to take a different
>> approach.
>
>
>
> My team is very interested to collaborate to make it upstream. Let's do it together.
>
>
>>
>> Btw, I thought Xen already had TPM support.  Is that not supported in
>> stubdom's?
>
>
>
> In Xen 4.3, Xen supports vtpm in stubdom for para-virtualization virtual machine only.
> My team is focusing on enabling stubdom vtpm for HVM virtual machine.
>
>
>
>>
>> --
>> Regards,
>> Corey Bryant
>>
>>>
>>> I'd be really pleased if you can help me on these issues.
>>>
>>> Quan Xu
>>> Intel
>>>
>>>
>>>> -----Original Message-----
>>>> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
>>>> Sent: Monday, November 25, 2013 9:53 PM
>>>> To: Xu, Quan
>>>> Cc: qemu-devel@nongnu.org
>>>> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software
>>>> TPM backend
>>>>
>>>>
>>>>
>>>> On 11/24/2013 10:36 PM, Xu, Quan wrote:
>>>>> Bryant,
>>>>>
>>>>>         I found that there is some conflict in qemu-options.hx
>>>>> between your
>>>> patch andqemu-1.7.0-rc1.tar.bz2
>>>> <http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.
>>>>>
>>>>> What QEMU version does this patch base on? Thanks.
>>>>>
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> Quan Xu
>>>>>
>>>>> Intel
>>>>>
>>>>>
>>>>>
>>>>
>>>> Thanks Quan.  I believe I built these on top of commit
>>>> c2d30667760e3d7b81290d801e567d4f758825ca.  I don't think this series
>>>> is going to make it upstream though so I likely won't be submitting a v2.
>>>>
>>>> --
>>>> Regards,
>>>> Corey Bryant
>>>
>>>
>
> Quan Xu
> Intel
>
>
>
>


[-- Attachment #2: bios.bin --]
[-- Type: application/octet-stream, Size: 262144 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-12-02 14:16         ` Corey Bryant
@ 2013-12-02 15:21           ` Stefan Berger
  2013-12-16 12:45           ` Xu, Quan
  2014-03-21 11:50           ` Jordi Cucurull Juan
  2 siblings, 0 replies; 11+ messages in thread
From: Stefan Berger @ 2013-12-02 15:21 UTC (permalink / raw)
  To: Corey Bryant, Xu, Quan; +Cc: qemu-devel@nongnu.org

On 12/02/2013 09:16 AM, Corey Bryant wrote:
>
> Stefan, do you know if this is the same code that was used to build 
> our bios.bin?
>
Probably similar, but forward-ported to a more recent version.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-12-02 14:16         ` Corey Bryant
  2013-12-02 15:21           ` Stefan Berger
@ 2013-12-16 12:45           ` Xu, Quan
  2014-03-21 11:50           ` Jordi Cucurull Juan
  2 siblings, 0 replies; 11+ messages in thread
From: Xu, Quan @ 2013-12-16 12:45 UTC (permalink / raw)
  To: Corey Bryant, Stefan Berger; +Cc: qemu-devel@nongnu.org

Hi Bryant / Berger,
	Now vtpm can be detected in virtual machine based on KVM. /sys/class/misc/tpm0 is created. /sys/class/misc/tpm0/device/{ active, owned, enabled} value are correct. 
But it is failed to take ownership. 
    1) Can it take ownership in your system?
    2) One software engineer will follow libtpms and make it work for KVM, and another software engineer will integrate qemu/seabios with Xen vtpm backend.
      Also my team is very interested to collaborate with you to upstream. BTW, I will follow all of the topics.
    3) We can schedule a meeting to talk in detail. I am at Intel Asian-Pacific R&D center, Shanghai China. Time zone 8+.



Thanks 
Quan




> -----Original Message-----
> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
> Sent: Monday, December 02, 2013 10:16 PM
> To: Xu, Quan; Stefan Berger
> Cc: qemu-devel@nongnu.org
> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM
> backend
> 
> 
> 
> On 12/01/2013 11:00 PM, Xu, Quan wrote:
> >
> >
> >> -----Original Message-----
> >> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
> >> Sent: Tuesday, November 26, 2013 10:40 PM
> >> To: Xu, Quan
> >> Cc: qemu-devel@nongnu.org
> >> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software
> >> TPM backend
> >>
> >>
> >> On 11/25/2013 10:04 PM, Xu, Quan wrote:
> >>>        Thanks Bryant, this problem has been solved by following
> >> "http://www.mail-archive.com/qemu-devel@nongnu.org/msg200808.html".
> >>>        But there is another problem when run configure with
> >>> "./configure --target-list=x86_64-softmmu --enable-tpm". The value
> >>> of "libtpms" is still "no". when I modified "tpm_libtpms" to "yes"
> >>> in configure file directly and make, then reported with error
> >>> "hw/tpm/tpm_libtpms.c:21:33: fatal error: libtpms/tpm_library.h: No
> >>> such file or directory".  Now I am installing libtpms with
> >> https://github.com/coreycb/libtpms for libtpms lib. Could you share
> >> specific step to configure QEMU based on your patch, if it comes easily to
> you?
> >>
> >> Here's what I've been using to build libtpms:
> >>
> >> $ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions
> >> -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic'
> >> $ export CFLAGS
> >> $ ./configure --build=x86_64-redhat-linux-gnu --prefix=/usr
> >> --libdir=/usr/lib64
> >> $ make
> >> $ sudo make install
> >>
> >> And then the configure you're using above should work for QEMU.
> >
> >
> >
> >      Sorry for my delay to answer you. I had a cold and took a sick leave at last
> Friday.
> 
> Not a problem.  I hope you're feeling better.
> 
> >
> >      Now I have setup QEMU with your patch. Start VM with below
> command:
> > ==
> >     qemu-system-x86_64 -m 1024 -hda rhel.raw -nographic -vnc :1 -drive
> > file=nvram.qcow2,if=none,id=nvram0-0-0,format=qcow2 -device
> > tpm-tis,tpmdev=tpm-tpm0,id=tpm0 -tpmdev
> > libtpms,id=tpm-tpm0,nvram=nvram0-0-0 -net nic -net
> > tap,ifname=tap0,script=no ==
> >
> > rhel.raw is Red Hat 6.4 image. Also I have rebuild kernel with TPM 1.2 driver in
> VM. But I still can't find " /sys/class/misc/tpm0/ ".
> >
> >      Does it need SeaBios bios.bin to make it work?  If need bios.bin, could
> you send me a bios.bin and tell me how to enable bios.bin with your patch?
> 
> Yes it needs bios.bin.  I've attached a bios.bin that has vTPM seabios updates.
> You should be able to copy everything from /usr/local/share/qemu to a new
> directory, and just replace the bios.bin in the new directory with the one I've
> attached.  Then point qemu at the new directory.
> 
> Also, make sure you enable the boot menu.  Then when you boot your guest
> you can press F11 to get a menu of TPM options to enable, disable, activate,
> deactivate, clear, etc the vTPM.
> 
> Here's some sample libvirt domain XML updates:
> 
> <domain type='kvm'
> xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
> ...
> <os>
>    <bootmenu enable='yes'/>
> </os>
> ...
>    <qemu:commandline>
>      <qemu:arg value='-drive'/>
>      <qemu:arg
> value='file=/home/corey/images/nvram.raw,if=none,id=drive-nvram0-0-0,forma
> t=raw'/>
>      <qemu:arg value='-tpmdev'/>
>      <qemu:arg value='libtpms,id=tpm-tpm0,nvram=drive-nvram0-0-0'/>
>      <qemu:arg value='-device'/>
>      <qemu:arg value='tpm-tis,tpmdev=tpm-tpm0,id=tpm0'/>
>      <qemu:arg value='-L'/>
>      <qemu:arg value='/usr/local/share/qemu/corey_seabios/'/>
>    </qemu:commandline>
> ...
> 
> >
> > BTW, I found a SeaBios patch:( Add TPM support to SeaBIOS)
> http://www.seabios.org/pipermail/seabios/2011-April/001609.html.
> >
> >
> >
> 
> Stefan, do you know if this is the same code that was used to build our bios.bin?
> 
> --
> Regards,
> Corey Bryant
> 
> >>
> >>>        BTW, one target of my team is enabling stubdom vtpm for HVM
> >>> virtual
> >> machine on Xen virtualization, your patches and seabios are big
> breakthroughs.
> >> My team is very interested to collaborate with you / Qemu community
> >> on similar areas.
> >>
> >> That's great to hear!
> >>
> >> Unfortunately, the current approach of linking QEMU against libtpms
> >> doesn't look like it's going to make it upstream.  So it looks like
> >> we need to take a different approach.
> >
> >
> >
> > My team is very interested to collaborate to make it upstream. Let's do it
> together.
> >
> >
> >>
> >> Btw, I thought Xen already had TPM support.  Is that not supported in
> >> stubdom's?
> >
> >
> >
> > In Xen 4.3, Xen supports vtpm in stubdom for para-virtualization virtual
> machine only.
> > My team is focusing on enabling stubdom vtpm for HVM virtual machine.
> >
> >
> >
> >>
> >> --
> >> Regards,
> >> Corey Bryant
> >>
> >>>
> >>> I'd be really pleased if you can help me on these issues.
> >>>
> >>> Quan Xu
> >>> Intel
> >>>
> >>>
> >>>> -----Original Message-----
> >>>> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
> >>>> Sent: Monday, November 25, 2013 9:53 PM
> >>>> To: Xu, Quan
> >>>> Cc: qemu-devel@nongnu.org
> >>>> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software
> >>>> TPM backend
> >>>>
> >>>>
> >>>>
> >>>> On 11/24/2013 10:36 PM, Xu, Quan wrote:
> >>>>> Bryant,
> >>>>>
> >>>>>         I found that there is some conflict in qemu-options.hx
> >>>>> between your
> >>>> patch andqemu-1.7.0-rc1.tar.bz2
> >>>> <http://wiki.qemu-project.org/download/qemu-1.7.0-rc1.tar.bz2>.
> >>>>>
> >>>>> What QEMU version does this patch base on? Thanks.
> >>>>>
> >>>>>
> >>>>>
> >>>>>
> >>>>>
> >>>>> Quan Xu
> >>>>>
> >>>>> Intel
> >>>>>
> >>>>>
> >>>>>
> >>>>
> >>>> Thanks Quan.  I believe I built these on top of commit
> >>>> c2d30667760e3d7b81290d801e567d4f758825ca.  I don't think this
> >>>> series is going to make it upstream though so I likely won't be submitting a
> v2.
> >>>>
> >>>> --
> >>>> Regards,
> >>>> Corey Bryant
> >>>
> >>>
> >
> > Quan Xu
> > Intel
> >
> >
> >
> >

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend
  2013-12-02 14:16         ` Corey Bryant
  2013-12-02 15:21           ` Stefan Berger
  2013-12-16 12:45           ` Xu, Quan
@ 2014-03-21 11:50           ` Jordi Cucurull Juan
  2 siblings, 0 replies; 11+ messages in thread
From: Jordi Cucurull Juan @ 2014-03-21 11:50 UTC (permalink / raw)
  To: Corey Bryant; +Cc: qemu-devel@nongnu.org, Xu, Quan, Stefan Berger

Dear all,

I have been testing the patches for the full virtual TPM implementation.

Following the comments of the previous posts I have had no issues to
make it working (using a QCOW2 non encrypted image). Nevertheless, if I
use a QCOW2 encrypted image to store the NVRAM information, the TPM
cannot be initiated normally after the first reboot:

qemu-system-x86_64: TPM NVRAM blob size too big
qemu-system-x86_64: TPM NVRAM load state failed

Are you aware of this? Have you experienced this issue?

Regards,
Jordi.


On 12/02/2013 03:16 PM, Corey Bryant wrote:
>
>
> On 12/01/2013 11:00 PM, Xu, Quan wrote:
>>
>>
>>> -----Original Message-----
>>> From: Corey Bryant [mailto:coreyb@linux.vnet.ibm.com]
>>> Sent: Tuesday, November 26, 2013 10:40 PM
>>> To: Xu, Quan
>>> Cc: qemu-devel@nongnu.org
>>> Subject: Re: [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM
>>> backend
>>>
>>>
>>> On 11/25/2013 10:04 PM, Xu, Quan wrote:
>>>>        Thanks Bryant, this problem has been solved by following
>>> "http://www.mail-archive.com/qemu-devel@nongnu.org/msg200808.html".
>>>>        But there is another problem when run configure with
>>>> "./configure --target-list=x86_64-softmmu --enable-tpm". The value of
>>>> "libtpms" is still "no". when I modified "tpm_libtpms" to "yes" in
>>>> configure file directly and make, then reported with error
>>>> "hw/tpm/tpm_libtpms.c:21:33: fatal error: libtpms/tpm_library.h: No
>>>> such file or directory".  Now I am installing libtpms with
>>> https://github.com/coreycb/libtpms for libtpms lib. Could you share
>>> specific step
>>> to configure QEMU based on your patch, if it comes easily to you?
>>>
>>> Here's what I've been using to build libtpms:
>>>
>>> $ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions
>>> -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic'
>>> $ export CFLAGS
>>> $ ./configure --build=x86_64-redhat-linux-gnu --prefix=/usr
>>> --libdir=/usr/lib64
>>> $ make
>>> $ sudo make install
>>>
>>> And then the configure you're using above should work for QEMU.
>>
>>
>>
>>      Sorry for my delay to answer you. I had a cold and took a sick
>> leave at last Friday.
>
> Not a problem.  I hope you're feeling better.
>
>>
>>      Now I have setup QEMU with your patch. Start VM with below command:
>> ==
>>     qemu-system-x86_64 -m 1024 -hda rhel.raw -nographic -vnc :1
>> -drive file=nvram.qcow2,if=none,id=nvram0-0-0,format=qcow2 -device
>> tpm-tis,tpmdev=tpm-tpm0,id=tpm0 -tpmdev
>> libtpms,id=tpm-tpm0,nvram=nvram0-0-0 -net nic -net
>> tap,ifname=tap0,script=no
>> ==
>>
>> rhel.raw is Red Hat 6.4 image. Also I have rebuild kernel with TPM
>> 1.2 driver in VM. But I still can't find " /sys/class/misc/tpm0/ ".
>>
>>      Does it need SeaBios bios.bin to make it work?  If need
>> bios.bin, could you send me a bios.bin and tell me how to enable
>> bios.bin with your patch?
>
> Yes it needs bios.bin.  I've attached a bios.bin that has vTPM seabios
> updates.  You should be able to copy everything from
> /usr/local/share/qemu to a new directory, and just replace the
> bios.bin in the new directory with the one I've attached.  Then point
> qemu at the new directory.
>
> Also, make sure you enable the boot menu.  Then when you boot your
> guest you can press F11 to get a menu of TPM options to enable,
> disable, activate, deactivate, clear, etc the vTPM.
>
> Here's some sample libvirt domain XML updates:
>
> <domain type='kvm'
> xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
> ...
> <os>
>   <bootmenu enable='yes'/>
> </os>
> ...
>   <qemu:commandline>
>     <qemu:arg value='-drive'/>
>     <qemu:arg
> value='file=/home/corey/images/nvram.raw,if=none,id=drive-nvram0-0-0,format=raw'/>
>     <qemu:arg value='-tpmdev'/>
>     <qemu:arg value='libtpms,id=tpm-tpm0,nvram=drive-nvram0-0-0'/>
>     <qemu:arg value='-device'/>
>     <qemu:arg value='tpm-tis,tpmdev=tpm-tpm0,id=tpm0'/>
>     <qemu:arg value='-L'/>
>     <qemu:arg value='/usr/local/share/qemu/corey_seabios/'/>
>   </qemu:commandline>
> ...
>
>>
>> BTW, I found a SeaBios patch:( Add TPM support to SeaBIOS)
>> http://www.seabios.org/pipermail/seabios/2011-April/001609.html.
>>
>>
>>
>
> Stefan, do you know if this is the same code that was used to build
> our bios.bin?
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2014-03-21 11:50 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-11-06 14:39 [Qemu-devel] [PATCH 4/4] tpm: Provide libtpms software TPM backend Corey Bryant
2013-11-19 19:37 ` Stefan Berger
  -- strict thread matches above, loose matches on Subject: below --
2013-11-25  3:36 Xu, Quan
2013-11-25 13:53 ` Corey Bryant
2013-11-26  3:04   ` Xu, Quan
2013-11-26 14:40     ` Corey Bryant
2013-12-02  4:00       ` Xu, Quan
2013-12-02 14:16         ` Corey Bryant
2013-12-02 15:21           ` Stefan Berger
2013-12-16 12:45           ` Xu, Quan
2014-03-21 11:50           ` Jordi Cucurull Juan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).