From: Jason Wang <jasowang@redhat.com>
To: Laurent Vivier <lvivier@redhat.com>, qemu-devel@nongnu.org
Cc: Stefan Hajnoczi <stefanha@redhat.com>, Cindy Lu <lulu@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>
Subject: Re: [PATCH v2 2/2] vhost-vdpa: add trace-events
Date: Tue, 22 Sep 2020 10:09:43 +0800 [thread overview]
Message-ID: <eeb7aee3-b15e-919c-d378-5cc3b2f9ff08@redhat.com> (raw)
In-Reply-To: <20200921130406.941363-3-lvivier@redhat.com>
On 2020/9/21 下午9:04, Laurent Vivier wrote:
> Add trace functionis in vhost-vdpa.c.
>
> All traces from this file can be enabled with '-trace vhost_vdpa*'.
>
> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>
> ---
> hw/virtio/trace-events | 29 ++++++++++++++
> hw/virtio/vhost-vdpa.c | 86 +++++++++++++++++++++++++++++++++++++++---
> 2 files changed, 110 insertions(+), 5 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 845200bf109d..2b453f77a4e3 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -22,6 +22,35 @@ vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
> vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
> vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
>
> +# vhost-vdpa.c
> +vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
> +vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
> +vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
> +vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
> +vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p"
> +vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x"
> +vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32
> +vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
> +vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
> +vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
> +vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
> +vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
> +vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
> +vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
> +vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
> +vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
> +vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
> +vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
> +vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
> +vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
> +vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
> +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u"
> +vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
> +vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d"
It's better to add set/get_vring_addr() and dma_map()/dma_unmap().
Thanks
> +vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
> +vhost_vdpa_set_owner(void *dev) "dev: %p"
> +vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
> +
> # virtio.c
> virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"
> virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u"
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 4580f3efd8a2..dc987704dcd9 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -20,6 +20,8 @@
> #include "hw/virtio/vhost-vdpa.h"
> #include "qemu/main-loop.h"
> #include "cpu.h"
> +#include "trace.h"
> +#include "qemu-common.h"
>
> static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
> {
> @@ -114,6 +116,9 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
> section->offset_within_region +
> (iova - section->offset_within_address_space);
>
> + trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
> + vaddr, section->readonly);
> +
> llsize = int128_sub(llend, int128_make64(iova));
>
> ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
> @@ -170,6 +175,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
> llend = int128_add(llend, section->size);
> llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
>
> + trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
> +
> if (int128_ge(int128_make64(iova), llend)) {
> return;
> }
> @@ -210,6 +217,7 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
> {
> uint8_t s;
>
> + trace_vhost_vdpa_add_status(dev, status);
> if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
> return;
> }
> @@ -224,6 +232,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
> struct vhost_vdpa *v;
> uint64_t features;
> assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> + trace_vhost_vdpa_init(dev, opaque);
>
> v = opaque;
> dev->opaque = opaque ;
> @@ -243,6 +252,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
> struct vhost_vdpa *v;
> assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> v = dev->opaque;
> + trace_vhost_vdpa_cleanup(dev, v);
> memory_listener_unregister(&v->listener);
>
> dev->opaque = NULL;
> @@ -251,13 +261,25 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
>
> static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
> {
> + trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
> return INT_MAX;
> }
>
> static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
> struct vhost_memory *mem)
> {
> -
> + trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
> + if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
> + trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
> + int i;
> + for (i = 0; i < mem->nregions; i++) {
> + trace_vhost_vdpa_dump_regions(dev, i,
> + mem->regions[i].guest_phys_addr,
> + mem->regions[i].memory_size,
> + mem->regions[i].userspace_addr,
> + mem->regions[i].flags_padding);
> + }
> + }
> if (mem->padding) {
> return -1;
> }
> @@ -269,6 +291,7 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
> uint64_t features)
> {
> int ret;
> + trace_vhost_vdpa_set_features(dev, features);
> ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
> uint8_t status = 0;
> if (ret) {
> @@ -283,26 +306,34 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
> int vhost_vdpa_get_device_id(struct vhost_dev *dev,
> uint32_t *device_id)
> {
> - return vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
> + int ret;
> + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
> + trace_vhost_vdpa_get_device_id(dev, *device_id);
> + return ret;
> }
>
> static int vhost_vdpa_reset_device(struct vhost_dev *dev)
> {
> + int ret;
> uint8_t status = 0;
>
> - return vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
> + ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
> + trace_vhost_vdpa_reset_device(dev, status);
> + return ret;
> }
>
> static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
> {
> assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
>
> + trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
> return idx - dev->vq_index;
> }
>
> static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
> {
> int i;
> + trace_vhost_vdpa_set_vring_ready(dev);
> for (i = 0; i < dev->nvqs; ++i) {
> struct vhost_vring_state state = {
> .index = dev->vq_index + i,
> @@ -313,6 +344,19 @@ static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
> return 0;
> }
>
> +static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
> + uint32_t config_len)
> +{
> + int b, len;
> + char line[QEMU_HEXDUMP_LINE_LEN];
> +
> + for (b = 0; b < config_len; b += 16) {
> + len = config_len - b;
> + qemu_hexdump_line(line, b, config, len, false);
> + trace_vhost_vdpa_dump_config(dev, line);
> + }
> +}
> +
> static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
> uint32_t offset, uint32_t size,
> uint32_t flags)
> @@ -320,6 +364,7 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
> struct vhost_vdpa_config *config;
> int ret;
> unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
> + trace_vhost_vdpa_set_config(dev, offset, size, flags);
> config = g_malloc(size + config_size);
> if (config == NULL) {
> return -1;
> @@ -327,6 +372,10 @@ static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
> config->off = offset;
> config->len = size;
> memcpy(config->buf, data, size);
> + if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
> + trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
> + vhost_vdpa_dump_config(dev, data, size);
> + }
> ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
> g_free(config);
> return ret;
> @@ -339,6 +388,7 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
> unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
> int ret;
>
> + trace_vhost_vdpa_get_config(dev, config, config_len);
> v_config = g_malloc(config_len + config_size);
> if (v_config == NULL) {
> return -1;
> @@ -348,12 +398,17 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
> ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
> memcpy(config, v_config->buf, config_len);
> g_free(v_config);
> + if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
> + trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
> + vhost_vdpa_dump_config(dev, config, config_len);
> + }
> return ret;
> }
>
> static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> {
> struct vhost_vdpa *v = dev->opaque;
> + trace_vhost_vdpa_dev_start(dev, started);
> if (started) {
> uint8_t status = 0;
> memory_listener_register(&v->listener, &address_space_memory);
> @@ -375,53 +430,72 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
> struct vhost_log *log)
> {
> + trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
> + log->log);
> return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
> }
>
> static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
> struct vhost_vring_addr *addr)
> {
> + trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
> + addr->desc_user_addr, addr->used_user_addr,
> + addr->avail_user_addr,
> + addr->log_guest_addr);
> return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
> }
>
> static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
> struct vhost_vring_state *ring)
> {
> + trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
> return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
> }
>
> static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
> struct vhost_vring_state *ring)
> {
> + trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
> return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
> }
>
> static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
> struct vhost_vring_state *ring)
> {
> - return vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
> + int ret;
> +
> + ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
> + trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
> + return ret;
> }
>
> static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
> struct vhost_vring_file *file)
> {
> + trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
> return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
> }
>
> static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
> struct vhost_vring_file *file)
> {
> + trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
> return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
> }
>
> static int vhost_vdpa_get_features(struct vhost_dev *dev,
> uint64_t *features)
> {
> - return vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
> + int ret;
> +
> + ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
> + trace_vhost_vdpa_get_features(dev, *features);
> + return ret;
> }
>
> static int vhost_vdpa_set_owner(struct vhost_dev *dev)
> {
> + trace_vhost_vdpa_set_owner(dev);
> return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
> }
>
> @@ -432,6 +506,8 @@ static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
> addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
> addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
> addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
> + trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
> + addr->avail_user_addr, addr->used_user_addr);
> return 0;
> }
>
next prev parent reply other threads:[~2020-09-22 2:11 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-21 13:04 [PATCH v2 0/2] vhost-vdpa: add trace functions in vhost-vdpa.c Laurent Vivier
2020-09-21 13:04 ` [PATCH v2 1/2] util/hexdump: introduce qemu_hexdump_line() Laurent Vivier
2020-09-21 13:04 ` [PATCH v2 2/2] vhost-vdpa: add trace-events Laurent Vivier
2020-09-22 2:09 ` Jason Wang [this message]
2020-09-24 8:42 ` Laurent Vivier
2020-09-24 9:13 ` Jason Wang
2020-09-22 7:37 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=eeb7aee3-b15e-919c-d378-5cc3b2f9ff08@redhat.com \
--to=jasowang@redhat.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).