qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 04/13] vfio/common: Add VFIOBitmap and alloc function


From: Cédric Le Goater
Subject: Re: [PATCH v3 04/13] vfio/common: Add VFIOBitmap and alloc function
Date: Mon, 6 Mar 2023 14:20:49 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.8.0

On 3/4/23 02:43, Joao Martins wrote:
From: Avihai Horon <avihaih@nvidia.com>

There are already two places where dirty page bitmap allocation and
calculations are done in open code. With device dirty page tracking
being added in next patches, there are going to be even more places.

To avoid code duplication, introduce VFIOBitmap struct and corresponding
alloc function and use them where applicable.

Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>

One minor comment, only in case you respin,

Reviewed-by: Cédric Le Goater <clg@redhat.com>

Thanks,

C.

---
  hw/vfio/common.c | 75 +++++++++++++++++++++++++++++-------------------
  1 file changed, 46 insertions(+), 29 deletions(-)

diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 4c801513136a..151e7f40b73d 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -320,6 +320,27 @@ const MemoryRegionOps vfio_region_ops = {
   * Device state interfaces
   */
+typedef struct {
+    unsigned long *bitmap;
+    hwaddr size;
+    hwaddr pages;
+} VFIOBitmap;
+
+static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
+{
+    vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
+    vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) /
+                                         BITS_PER_BYTE;
+    vbmap->bitmap = g_try_malloc0(vbmap->size);
+    if (!vbmap->bitmap) {
+        errno = ENOMEM;
+
+        return -errno;

vfio_bitmap_alloc() could simply return ENOMEM now.

+    }
+
+    return 0;
+}
+
  bool vfio_mig_active(void)
  {
      VFIOGroup *group;
@@ -468,9 +489,14 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
  {
      struct vfio_iommu_type1_dma_unmap *unmap;
      struct vfio_bitmap *bitmap;
-    uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
+    VFIOBitmap vbmap;
      int ret;
+ ret = vfio_bitmap_alloc(&vbmap, size);
+    if (ret) {
+        return -errno;
+    }
+
      unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
@@ -484,35 +510,28 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
       * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
       * to qemu_real_host_page_size.
       */
-
      bitmap->pgsize = qemu_real_host_page_size();
-    bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
-                   BITS_PER_BYTE;
+    bitmap->size = vbmap.size;
+    bitmap->data = (__u64 *)vbmap.bitmap;
- if (bitmap->size > container->max_dirty_bitmap_size) {
-        error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
-                     (uint64_t)bitmap->size);
+    if (vbmap.size > container->max_dirty_bitmap_size) {
+        error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
          ret = -E2BIG;
          goto unmap_exit;
      }
- bitmap->data = g_try_malloc0(bitmap->size);
-    if (!bitmap->data) {
-        ret = -ENOMEM;
-        goto unmap_exit;
-    }
-
      ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
      if (!ret) {
-        cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data,
-                iotlb->translated_addr, pages);
+        cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+                iotlb->translated_addr, vbmap.pages);
      } else {
          error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
      }
- g_free(bitmap->data);
  unmap_exit:
      g_free(unmap);
+    g_free(vbmap.bitmap);
+
      return ret;
  }
@@ -1329,7 +1348,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
  {
      struct vfio_iommu_type1_dirty_bitmap *dbitmap;
      struct vfio_iommu_type1_dirty_bitmap_get *range;
-    uint64_t pages;
+    VFIOBitmap vbmap;
      int ret;
if (!container->dirty_pages_supported) {
@@ -1339,6 +1358,11 @@ static int vfio_get_dirty_bitmap(VFIOContainer 
*container, uint64_t iova,
          return 0;
      }
+ ret = vfio_bitmap_alloc(&vbmap, size);
+    if (ret) {
+        return -errno;
+    }
+
      dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
@@ -1353,15 +1377,8 @@ static int vfio_get_dirty_bitmap(VFIOContainer 
*container, uint64_t iova,
       * to qemu_real_host_page_size.
       */
      range->bitmap.pgsize = qemu_real_host_page_size();
-
-    pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size();
-    range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
-                                         BITS_PER_BYTE;
-    range->bitmap.data = g_try_malloc0(range->bitmap.size);
-    if (!range->bitmap.data) {
-        ret = -ENOMEM;
-        goto err_out;
-    }
+    range->bitmap.size = vbmap.size;
+    range->bitmap.data = (__u64 *)vbmap.bitmap;
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
      if (ret) {
@@ -1372,14 +1389,14 @@ static int vfio_get_dirty_bitmap(VFIOContainer 
*container, uint64_t iova,
          goto err_out;
      }
- cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
-                                            ram_addr, pages);
+    cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
+                                           vbmap.pages);
trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
                                  range->bitmap.size, ram_addr);
  err_out:
-    g_free(range->bitmap.data);
      g_free(dbitmap);
+    g_free(vbmap.bitmap);
return ret;
  }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]