qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Kirti Wankhede <kwankhede@nvidia.com>
To: Alex Williamson <alex.williamson@redhat.com>
Cc: cjia@nvidia.com, zhi.wang.linux@gmail.com, aik@ozlabs.ru,
	Zhengxiao.zx@Alibaba-inc.com, shuangtai.tst@alibaba-inc.com,
	qemu-devel@nongnu.org, peterx@redhat.com, eauger@redhat.com,
	yi.l.liu@intel.com, quintela@redhat.com, ziye.yang@intel.com,
	armbru@redhat.com, mlevitsk@redhat.com, pasic@linux.ibm.com,
	felipe@nutanix.com, zhi.a.wang@intel.com, mcrossley@nvidia.com,
	kevin.tian@intel.com, yan.y.zhao@intel.com, eskultet@redhat.com,
	dgilbert@redhat.com, changpeng.liu@intel.com, cohuck@redhat.com,
	Ken.Xue@amd.com, jonathan.davies@nutanix.com,
	pbonzini@redhat.com, dnigam@nvidia.com
Subject: Re: [PATCH v27 14/17] vfio: Dirty page tracking when vIOMMU is enabled
Date: Fri, 23 Oct 2020 13:25:10 +0530	[thread overview]
Message-ID: <e7cedc2e-232c-76c3-f192-57ebdb27abec@nvidia.com> (raw)
In-Reply-To: <20201022143710.6a11facc@w520.home>



On 10/23/2020 2:07 AM, Alex Williamson wrote:
> On Thu, 22 Oct 2020 16:42:04 +0530
> Kirti Wankhede <kwankhede@nvidia.com> wrote:
> 
>> When vIOMMU is enabled, register MAP notifier from log_sync when all
>> devices in container are in stop and copy phase of migration. Call replay
>> and get dirty pages from notifier callback.
>>
>> Suggested-by: Alex Williamson <alex.williamson@redhat.com>
>> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
>> ---
>>   hw/vfio/common.c              | 95 ++++++++++++++++++++++++++++++++++++++++---
>>   hw/vfio/trace-events          |  1 +
>>   include/hw/vfio/vfio-common.h |  1 +
>>   3 files changed, 91 insertions(+), 6 deletions(-)
>>
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 2634387df948..98c2b1f9b190 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -442,8 +442,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
>>   }
>>   
>>   /* Called with rcu_read_lock held.  */
>> -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
>> -                           bool *read_only)
>> +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
>> +                               ram_addr_t *ram_addr, bool *read_only)
>>   {
>>       MemoryRegion *mr;
>>       hwaddr xlat;
>> @@ -474,8 +474,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
>>           return false;
>>       }
>>   
>> -    *vaddr = memory_region_get_ram_ptr(mr) + xlat;
>> -    *read_only = !writable || mr->readonly;
>> +    if (vaddr) {
>> +        *vaddr = memory_region_get_ram_ptr(mr) + xlat;
>> +    }
>> +
>> +    if (ram_addr) {
>> +        *ram_addr = memory_region_get_ram_addr(mr) + xlat;
>> +    }
>> +
>> +    if (read_only) {
>> +        *read_only = !writable || mr->readonly;
>> +    }
>>   
>>       return true;
>>   }
>> @@ -485,7 +494,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>>       VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
>>       VFIOContainer *container = giommu->container;
>>       hwaddr iova = iotlb->iova + giommu->iommu_offset;
>> -    bool read_only;
>>       void *vaddr;
>>       int ret;
>>   
>> @@ -501,7 +509,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>>       rcu_read_lock();
>>   
>>       if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
>> -        if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) {
>> +        bool read_only;
>> +
>> +        if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
>>               goto out;
>>           }
>>           /*
>> @@ -899,11 +909,84 @@ err_out:
>>       return ret;
>>   }
>>   
>> +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
>> +{
>> +    VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, dirty_notify);
>> +    VFIOContainer *container = giommu->container;
>> +    hwaddr iova = iotlb->iova + giommu->iommu_offset;
>> +    ram_addr_t translated_addr;
>> +
>> +    trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
>> +
>> +    if (iotlb->target_as != &address_space_memory) {
>> +        error_report("Wrong target AS \"%s\", only system memory is allowed",
>> +                     iotlb->target_as->name ? iotlb->target_as->name : "none");
>> +        return;
>> +    }
>> +
>> +    rcu_read_lock();
>> +
>> +    if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
>> +        int ret;
>> +
>> +        ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
>> +                                    translated_addr);
>> +        if (ret) {
>> +            error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
>> +                         "0x%"HWADDR_PRIx") = %d (%m)",
>> +                         container, iova,
>> +                         iotlb->addr_mask + 1, ret);
>> +        }
>> +    }
>> +
>> +    rcu_read_unlock();
>> +}
>> +
>>   static int vfio_sync_dirty_bitmap(VFIOContainer *container,
>>                                     MemoryRegionSection *section)
>>   {
>>       ram_addr_t ram_addr;
>>   
>> +    if (memory_region_is_iommu(section->mr)) {
>> +        VFIOGuestIOMMU *giommu;
>> +        int ret = 0;
>> +
>> +        QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
>> +            if (MEMORY_REGION(giommu->iommu) == section->mr &&
>> +                giommu->n.start == section->offset_within_region) {
>> +                Int128 llend;
>> +                Error *err = NULL;
>> +                int idx = memory_region_iommu_attrs_to_index(giommu->iommu,
>> +                                                       MEMTXATTRS_UNSPECIFIED);
>> +
>> +                llend = int128_add(int128_make64(section->offset_within_region),
>> +                                   section->size);
>> +                llend = int128_sub(llend, int128_one());
>> +
>> +                iommu_notifier_init(&giommu->dirty_notify,
>> +                                    vfio_iommu_map_dirty_notify,
>> +                                    IOMMU_NOTIFIER_MAP,
>> +                                    section->offset_within_region,
>> +                                    int128_get64(llend),
>> +                                    idx);
>> +                ret = memory_region_register_iommu_notifier(section->mr,
>> +                                                  &giommu->dirty_notify, &err);
>> +                if (ret) {
>> +                    error_report_err(err);
>> +                    break;
>> +                }
>> +
>> +                memory_region_iommu_replay(giommu->iommu,
>> +                                           &giommu->dirty_notify);
>> +
>> +                memory_region_unregister_iommu_notifier(section->mr,
>> +                                                        &giommu->dirty_notify);
> 
> 
> Is it necessary to do the register/unregister?  It seemed to me that
> perhaps we could do a replay independent of those.
> 

Earlier I thought to do a replay, we need to regsiter. But you are 
right, I verified replay works without registering.

> I'd also be tempted to move dirty_notify to a temporary object rather
> than store it on the giommu for such a brief usage, ie. define:
> 
> struct giommu_dirty_notfier {
>      IOMMUNotifier n;
>      VFIOGuestIOMMU *giommu;
> }
> 
> struct giommu_dirty_notfier n = { .giommu = giommu };
> 
> iommu_notifier_init(&n,...);
> 
> memory_region_iommu_replay(giommu->iommu, &n);
> ...
> 
> struct giommu_dirty_notfier *ndnotifier = container_of(n, struct giommu_dirty_notfier, n);
> VFIOGuestIOMMU *giommu = n->giommu;
> 
> It's nice that we remove the extra bloat of the list/tree entirely with
> this approach.  Thanks,
> 

Thanks for your suggestion. Changing as you suggested above.

Thanks,
Kirti


  reply	other threads:[~2020-10-23  7:56 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-22 11:11 [PATCH v27 00/17] Add migration support for VFIO devices Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 01/17] vfio: Add function to unmap VFIO region Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 02/17] vfio: Add vfio_get_object callback to VFIODeviceOps Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 03/17] vfio: Add save and load functions for VFIO PCI devices Kirti Wankhede
2020-10-22 14:06   ` Alex Williamson
2020-10-22 15:52     ` Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 04/17] vfio: Add migration region initialization and finalize function Kirti Wankhede
2020-10-22 14:22   ` Alex Williamson
2020-10-22 16:16     ` Kirti Wankhede
2020-10-23 11:17   ` Cornelia Huck
2020-10-22 11:11 ` [PATCH v27 05/17] vfio: Add VM state change handler to know state of VM Kirti Wankhede
2020-10-22 16:35   ` Alex Williamson
2020-10-22 17:41     ` Kirti Wankhede
2020-10-22 18:29       ` Alex Williamson
2020-10-22 11:11 ` [PATCH v27 06/17] vfio: Add migration state change notifier Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 07/17] vfio: Register SaveVMHandlers for VFIO device Kirti Wankhede
2020-10-22 18:51   ` Alex Williamson
2020-10-23  7:12     ` Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 08/17] vfio: Add save state functions to SaveVMHandlers Kirti Wankhede
2020-10-22 11:11 ` [PATCH v27 09/17] vfio: Add load " Kirti Wankhede
2020-10-22 19:50   ` Alex Williamson
2020-10-23  9:59     ` Kirti Wankhede
2020-10-22 11:12 ` [PATCH v27 10/17] memory: Set DIRTY_MEMORY_MIGRATION when IOMMU is enabled Kirti Wankhede
2020-10-22 19:52   ` Alex Williamson
2020-10-22 11:12 ` [PATCH v27 11/17] vfio: Get migration capability flags for container Kirti Wankhede
2020-10-22 11:12 ` [PATCH v27 12/17] vfio: Add function to start and stop dirty pages tracking Kirti Wankhede
2020-10-22 11:12 ` [PATCH v27 13/17] vfio: Add vfio_listener_log_sync to mark dirty pages Kirti Wankhede
2020-10-22 11:12 ` [PATCH v27 14/17] vfio: Dirty page tracking when vIOMMU is enabled Kirti Wankhede
2020-10-22 20:37   ` Alex Williamson
2020-10-23  7:55     ` Kirti Wankhede [this message]
2020-10-22 11:12 ` [PATCH v27 15/17] vfio: Add ioctl to get dirty pages bitmap during dma unmap Kirti Wankhede
2020-10-22 11:12 ` [PATCH v27 16/17] vfio: Make vfio-pci device migration capable Kirti Wankhede
2020-10-22 11:12 ` [PATCH v27 17/17] qapi: Add VFIO devices migration stats in Migration stats Kirti Wankhede
2020-10-22 22:18   ` Alex Williamson
2020-10-23 10:21     ` Kirti Wankhede
2020-10-22 21:28 ` [PATCH v27 00/17] Add migration support for VFIO devices Alex Williamson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e7cedc2e-232c-76c3-f192-57ebdb27abec@nvidia.com \
    --to=kwankhede@nvidia.com \
    --cc=Ken.Xue@amd.com \
    --cc=Zhengxiao.zx@Alibaba-inc.com \
    --cc=aik@ozlabs.ru \
    --cc=alex.williamson@redhat.com \
    --cc=armbru@redhat.com \
    --cc=changpeng.liu@intel.com \
    --cc=cjia@nvidia.com \
    --cc=cohuck@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=dnigam@nvidia.com \
    --cc=eauger@redhat.com \
    --cc=eskultet@redhat.com \
    --cc=felipe@nutanix.com \
    --cc=jonathan.davies@nutanix.com \
    --cc=kevin.tian@intel.com \
    --cc=mcrossley@nvidia.com \
    --cc=mlevitsk@redhat.com \
    --cc=pasic@linux.ibm.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=shuangtai.tst@alibaba-inc.com \
    --cc=yan.y.zhao@intel.com \
    --cc=yi.l.liu@intel.com \
    --cc=zhi.a.wang@intel.com \
    --cc=zhi.wang.linux@gmail.com \
    --cc=ziye.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).