[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 04/17] vfio/common: Add VFIOBitmap and alloc function
From: |
Alex Williamson |
Subject: |
[PULL 04/17] vfio/common: Add VFIOBitmap and alloc function |
Date: |
Tue, 07 Mar 2023 11:52:40 -0700 |
User-agent: |
StGit/1.5.dev2+g9ce680a52bd9 |
From: Avihai Horon <avihaih@nvidia.com>
There are already two places where dirty page bitmap allocation and
calculations are done in open code.
To avoid code duplication, introduce VFIOBitmap struct and corresponding
alloc function and use them where applicable.
Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Link: 20230307125450.62409-5-joao.m.martins@oracle.com">https://lore.kernel.org/r/20230307125450.62409-5-joao.m.martins@oracle.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
hw/vfio/common.c | 73 +++++++++++++++++++++++++++++++++---------------------
1 file changed, 44 insertions(+), 29 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 4c801513136a..cec3de08d2b4 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -320,6 +320,25 @@ const MemoryRegionOps vfio_region_ops = {
* Device state interfaces
*/
+typedef struct {
+ unsigned long *bitmap;
+ hwaddr size;
+ hwaddr pages;
+} VFIOBitmap;
+
+static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
+{
+ vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
+ vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) /
+ BITS_PER_BYTE;
+ vbmap->bitmap = g_try_malloc0(vbmap->size);
+ if (!vbmap->bitmap) {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
bool vfio_mig_active(void)
{
VFIOGroup *group;
@@ -468,9 +487,14 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
{
struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap;
- uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
+ VFIOBitmap vbmap;
int ret;
+ ret = vfio_bitmap_alloc(&vbmap, size);
+ if (ret) {
+ return ret;
+ }
+
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
@@ -484,35 +508,28 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
* qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
* to qemu_real_host_page_size.
*/
-
bitmap->pgsize = qemu_real_host_page_size();
- bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
- BITS_PER_BYTE;
+ bitmap->size = vbmap.size;
+ bitmap->data = (__u64 *)vbmap.bitmap;
- if (bitmap->size > container->max_dirty_bitmap_size) {
- error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
- (uint64_t)bitmap->size);
+ if (vbmap.size > container->max_dirty_bitmap_size) {
+ error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
ret = -E2BIG;
goto unmap_exit;
}
- bitmap->data = g_try_malloc0(bitmap->size);
- if (!bitmap->data) {
- ret = -ENOMEM;
- goto unmap_exit;
- }
-
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
- cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data,
- iotlb->translated_addr, pages);
+ cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+ iotlb->translated_addr, vbmap.pages);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
}
- g_free(bitmap->data);
unmap_exit:
g_free(unmap);
+ g_free(vbmap.bitmap);
+
return ret;
}
@@ -1329,7 +1346,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
{
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
- uint64_t pages;
+ VFIOBitmap vbmap;
int ret;
if (!container->dirty_pages_supported) {
@@ -1339,6 +1356,11 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
return 0;
}
+ ret = vfio_bitmap_alloc(&vbmap, size);
+ if (ret) {
+ return ret;
+ }
+
dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
@@ -1353,15 +1375,8 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
* to qemu_real_host_page_size.
*/
range->bitmap.pgsize = qemu_real_host_page_size();
-
- pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size();
- range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
- BITS_PER_BYTE;
- range->bitmap.data = g_try_malloc0(range->bitmap.size);
- if (!range->bitmap.data) {
- ret = -ENOMEM;
- goto err_out;
- }
+ range->bitmap.size = vbmap.size;
+ range->bitmap.data = (__u64 *)vbmap.bitmap;
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
if (ret) {
@@ -1372,14 +1387,14 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
goto err_out;
}
- cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
- ram_addr, pages);
+ cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
+ vbmap.pages);
trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
range->bitmap.size, ram_addr);
err_out:
- g_free(range->bitmap.data);
g_free(dbitmap);
+ g_free(vbmap.bitmap);
return ret;
}
- [PULL 00/17] VFIO updates for 8.0, Alex Williamson, 2023/03/07
- [PULL 01/17] vfio/common: Fix error reporting in vfio_get_dirty_bitmap(), Alex Williamson, 2023/03/07
- [PULL 02/17] vfio/common: Fix wrong %m usages, Alex Williamson, 2023/03/07
- [PULL 03/17] vfio/common: Abort migration if dirty log start/stop/sync fails, Alex Williamson, 2023/03/07
- [PULL 04/17] vfio/common: Add VFIOBitmap and alloc function,
Alex Williamson <=
- [PULL 05/17] vfio/common: Add helper to validate iova/end against hostwin, Alex Williamson, 2023/03/07
- [PULL 06/17] vfio/common: Use a single tracepoint for skipped sections, Alex Williamson, 2023/03/07
- [PULL 07/17] vfio/common: Consolidate skip/invalid section into helper, Alex Williamson, 2023/03/07
- [PULL 08/17] vfio/common: Add helper to consolidate iova/end calculation, Alex Williamson, 2023/03/07
- [PULL 09/17] vfio/common: Record DMA mapped IOVA ranges, Alex Williamson, 2023/03/07
- [PULL 10/17] vfio/common: Add device dirty page tracking start/stop, Alex Williamson, 2023/03/07
- [PULL 11/17] vfio/common: Extract code from vfio_get_dirty_bitmap() to new function, Alex Williamson, 2023/03/07
- [PULL 12/17] vfio/common: Add device dirty page bitmap sync, Alex Williamson, 2023/03/07
- [PULL 13/17] vfio/migration: Block migration with vIOMMU, Alex Williamson, 2023/03/07
- [PULL 14/17] vfio/migration: Query device dirty page tracking support, Alex Williamson, 2023/03/07