From: Fam Zheng <famz@redhat.com>
To: Gonglei <arei.gonglei@huawei.com>
Cc: qemu-devel@nongnu.org, pbonzini@redhat.com, peter.huangpeng@huawei.com
Subject: Re: [Qemu-devel] [PATCH 3/3] memory: pass RAMBlock as a agrument
Date: Fri, 29 Apr 2016 16:06:39 +0800 [thread overview]
Message-ID: <20160429080639.GP1421@ad.usersys.redhat.com> (raw)
In-Reply-To: <1461123261-15408-4-git-send-email-arei.gonglei@huawei.com>
On Wed, 04/20 11:34, Gonglei wrote:
> In this way, we can avoid to invoke qemu_get_ram_block()
> at most time, which can save cpu cycle.
>
> Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> ---
> exec.c | 48 ++++++++++++++++++++++++++++++------------------
> hw/misc/ivshmem.c | 6 ++++--
> hw/virtio/vhost-user.c | 11 ++++++-----
> include/exec/ram_addr.h | 6 +++---
> memory.c | 3 ++-
> 5 files changed, 45 insertions(+), 29 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index b0ecbcd..f3348aa 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1813,37 +1813,49 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
> }
> #endif /* !_WIN32 */
>
> -int qemu_get_ram_fd(ram_addr_t addr)
> +int qemu_get_ram_fd(RAMBlock *ram_block, ram_addr_t addr)
> {
> - RAMBlock *block;
> + RAMBlock *block = ram_block;
> int fd;
>
> - rcu_read_lock();
> - block = qemu_get_ram_block(addr);
> - fd = block->fd;
> - rcu_read_unlock();
> + if (block == NULL) {
Is this branch even used? It seems no caller passes in literal NULL in this
patch. Can the variables be NULL?
If not, I think we can simply drop addr.
> + rcu_read_lock();
> + block = qemu_get_ram_block(addr);
> + fd = block->fd;
> + rcu_read_unlock();
> + } else {
> + fd = block->fd;
> + }
> return fd;
> }
>
> -void qemu_set_ram_fd(ram_addr_t addr, int fd)
> +void qemu_set_ram_fd(RAMBlock *ram_block, ram_addr_t addr, int fd)
> {
> - RAMBlock *block;
> + RAMBlock *block = ram_block;
>
> - rcu_read_lock();
> - block = qemu_get_ram_block(addr);
> - block->fd = fd;
> - rcu_read_unlock();
> + if (block == NULL) {
Same here...
> + rcu_read_lock();
> + block = qemu_get_ram_block(addr);
> + block->fd = fd;
> + rcu_read_unlock();
> + } else {
> + block->fd = fd;
> + }
> }
>
> -void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
> +void *qemu_get_ram_block_host_ptr(RAMBlock *ram_block, ram_addr_t addr)
> {
> - RAMBlock *block;
> + RAMBlock *block = ram_block;
> void *ptr;
>
> - rcu_read_lock();
> - block = qemu_get_ram_block(addr);
> - ptr = ramblock_ptr(block, 0);
> - rcu_read_unlock();
> + if (block == NULL) {
And here.
> + rcu_read_lock();
> + block = qemu_get_ram_block(addr);
> + ptr = ramblock_ptr(block, 0);
> + rcu_read_unlock();
> + } else {
> + ptr = ramblock_ptr(block, 0);
> + }
> return ptr;
> }
>
> diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
> index e40f23b..5ba0af7 100644
> --- a/hw/misc/ivshmem.c
> +++ b/hw/misc/ivshmem.c
> @@ -533,7 +533,8 @@ static void process_msg_shmem(IVShmemState *s, int fd, Error **errp)
> }
> memory_region_init_ram_ptr(&s->server_bar2, OBJECT(s),
> "ivshmem.bar2", size, ptr);
> - qemu_set_ram_fd(memory_region_get_ram_addr(&s->server_bar2), fd);
> + qemu_set_ram_fd(s->server_bar2.ram_block,
> + memory_region_get_ram_addr(&s->server_bar2), fd);
> s->ivshmem_bar2 = &s->server_bar2;
> }
>
> @@ -940,7 +941,8 @@ static void ivshmem_exit(PCIDevice *dev)
> strerror(errno));
> }
>
> - fd = qemu_get_ram_fd(memory_region_get_ram_addr(s->ivshmem_bar2));
> + fd = qemu_get_ram_fd(s->ivshmem_bar2->ram_block,
> + memory_region_get_ram_addr(s->ivshmem_bar2));
> close(fd);
> }
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 5914e85..4b93533 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -248,17 +248,18 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
> for (i = 0; i < dev->mem->nregions; ++i) {
> struct vhost_memory_region *reg = dev->mem->regions + i;
> ram_addr_t ram_addr;
> + MemoryRegion *mr;
>
> assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> - qemu_ram_addr_from_host((void *)(uintptr_t)reg->userspace_addr,
> + mr = qemu_ram_addr_from_host((void *)(uintptr_t)reg->userspace_addr,
> &ram_addr);
> - fd = qemu_get_ram_fd(ram_addr);
> + fd = qemu_get_ram_fd(mr->ram_block, ram_addr);
> if (fd > 0) {
> msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
> msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
> msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
> msg.payload.memory.regions[fd_num].mmap_offset = reg->userspace_addr -
> - (uintptr_t) qemu_get_ram_block_host_ptr(ram_addr);
> + (uintptr_t) qemu_get_ram_block_host_ptr(mr->ram_block, ram_addr);
> assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
> fds[fd_num++] = fd;
> }
> @@ -622,11 +623,11 @@ static bool vhost_user_can_merge(struct vhost_dev *dev,
>
> mr = qemu_ram_addr_from_host((void *)(uintptr_t)start1, &ram_addr);
> assert(mr);
> - mfd = qemu_get_ram_fd(ram_addr);
> + mfd = qemu_get_ram_fd(mr->ram_block, ram_addr);
>
> mr = qemu_ram_addr_from_host((void *)(uintptr_t)start2, &ram_addr);
> assert(mr);
> - rfd = qemu_get_ram_fd(ram_addr);
> + rfd = qemu_get_ram_fd(mr->ram_block, ram_addr);
>
> return mfd == rfd;
> }
> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> index 5b6e1b8..4e1e5c3 100644
> --- a/include/exec/ram_addr.h
> +++ b/include/exec/ram_addr.h
> @@ -105,9 +105,9 @@ RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
> uint64_t length,
> void *host),
> MemoryRegion *mr, Error **errp);
> -int qemu_get_ram_fd(ram_addr_t addr);
> -void qemu_set_ram_fd(ram_addr_t addr, int fd);
> -void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
> +int qemu_get_ram_fd(RAMBlock *ram_block, ram_addr_t addr);
> +void qemu_set_ram_fd(RAMBlock *ram_block, ram_addr_t addr, int fd);
> +void *qemu_get_ram_block_host_ptr(RAMBlock *ram_block, ram_addr_t addr);
> void qemu_ram_free(RAMBlock *block);
>
> int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
> diff --git a/memory.c b/memory.c
> index 239e6da..fa2b6e2 100644
> --- a/memory.c
> +++ b/memory.c
> @@ -1641,7 +1641,8 @@ int memory_region_get_fd(MemoryRegion *mr)
>
> assert(mr->ram_block);
>
> - return qemu_get_ram_fd(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
> + return qemu_get_ram_fd(mr->ram_block,
> + memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
> }
>
> void *memory_region_get_ram_ptr(MemoryRegion *mr)
> --
> 1.7.12.4
>
>
next prev parent reply other threads:[~2016-04-29 8:43 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-20 3:34 [Qemu-devel] [PATCH 0/3] memory: some little optimzation Gonglei
2016-04-20 3:34 ` [Qemu-devel] [PATCH 1/3] memory: drop find_ram_block() Gonglei
2016-04-29 7:48 ` Fam Zheng
2016-04-29 8:57 ` Gonglei (Arei)
2016-04-20 3:34 ` [Qemu-devel] [PATCH 2/3] exec: adjuest rcu_read_lock requiement Gonglei
2016-04-29 7:56 ` Fam Zheng
2016-04-29 8:59 ` Gonglei (Arei)
2016-04-20 3:34 ` [Qemu-devel] [PATCH 3/3] memory: pass RAMBlock as a agrument Gonglei
2016-04-29 8:06 ` Fam Zheng [this message]
2016-04-29 9:03 ` Gonglei (Arei)
2016-05-09 11:41 ` [Qemu-devel] [PATCH 0/3] memory: some little optimzation Paolo Bonzini
2016-05-10 2:05 ` Gonglei (Arei)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160429080639.GP1421@ad.usersys.redhat.com \
--to=famz@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=pbonzini@redhat.com \
--cc=peter.huangpeng@huawei.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).