+ if (!vbmap) {
+ errno = ENOMEM;
+
+ return NULL;
+ }
+
+ vbmap->pages = REAL_HOST_PAGE_ALIGN(size) /
qemu_real_host_page_size();
+ vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) *
BITS_PER_BYTE) /
+ BITS_PER_BYTE;
+ vbmap->bitmap = g_try_malloc0(vbmap->size);
+ if (!vbmap->bitmap) {
+ g_free(vbmap);
+ errno = ENOMEM;
+
+ return NULL;
+ }
+
+ return vbmap;
+}
+
+static void vfio_bitmap_dealloc(VFIOBitmap *vbmap)
+{
+ g_free(vbmap->bitmap);
+ g_free(vbmap);
+}
+
bool vfio_mig_active(void)
{
VFIOGroup *group;
@@ -470,9 +505,14 @@ static int vfio_dma_unmap_bitmap(VFIOContainer
*container,
{
struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap;
- uint64_t pages = REAL_HOST_PAGE_ALIGN(size) /
qemu_real_host_page_size();
+ VFIOBitmap *vbmap;
int ret;
+ vbmap = vfio_bitmap_alloc(size);
+ if (!vbmap) {
+ return -errno;
+ }
+
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
@@ -486,35 +526,28 @@ static int vfio_dma_unmap_bitmap(VFIOContainer
*container,
* qemu_real_host_page_size to mark those dirty. Hence set
bitmap_pgsize
* to qemu_real_host_page_size.
*/
-
bitmap->pgsize = qemu_real_host_page_size();
- bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
- BITS_PER_BYTE;
+ bitmap->size = vbmap->size;
+ bitmap->data = (__u64 *)vbmap->bitmap;
- if (bitmap->size > container->max_dirty_bitmap_size) {
- error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
- (uint64_t)bitmap->size);
+ if (vbmap->size > container->max_dirty_bitmap_size) {
+ error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
vbmap->size);
ret = -E2BIG;
goto unmap_exit;
}
- bitmap->data = g_try_malloc0(bitmap->size);
- if (!bitmap->data) {
- ret = -ENOMEM;
- goto unmap_exit;
- }
-
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
- cpu_physical_memory_set_dirty_lebitmap((unsigned long
*)bitmap->data,
- iotlb->translated_addr, pages);
+ cpu_physical_memory_set_dirty_lebitmap(vbmap->bitmap,
+ iotlb->translated_addr, vbmap->pages);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
}
- g_free(bitmap->data);
unmap_exit:
g_free(unmap);
+ vfio_bitmap_dealloc(vbmap);
+
return ret;
}
@@ -1331,7 +1364,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
{
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
- uint64_t pages;
+ VFIOBitmap *vbmap;
int ret;
if (!container->dirty_pages_supported) {
@@ -1341,6 +1374,11 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
return 0;
}
+ vbmap = vfio_bitmap_alloc(size);
+ if (!vbmap) {
+ return -errno;
+ }
+
dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
@@ -1355,15 +1393,8 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
* to qemu_real_host_page_size.
*/
range->bitmap.pgsize = qemu_real_host_page_size();
-
- pages = REAL_HOST_PAGE_ALIGN(range->size) /
qemu_real_host_page_size();
- range->bitmap.size = ROUND_UP(pages, sizeof(__u64) *
BITS_PER_BYTE) /
- BITS_PER_BYTE;
- range->bitmap.data = g_try_malloc0(range->bitmap.size);
- if (!range->bitmap.data) {
- ret = -ENOMEM;
- goto err_out;
- }
+ range->bitmap.size = vbmap->size;
+ range->bitmap.data = (__u64 *)vbmap->bitmap;
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
if (ret) {
@@ -1374,14 +1405,14 @@ static int
vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
goto err_out;
}
- cpu_physical_memory_set_dirty_lebitmap((unsigned long
*)range->bitmap.data,
- ram_addr, pages);
+ cpu_physical_memory_set_dirty_lebitmap(vbmap->bitmap, ram_addr,
+ vbmap->pages);
trace_vfio_get_dirty_bitmap(container->fd, range->iova,
range->size,
range->bitmap.size, ram_addr);
err_out:
- g_free(range->bitmap.data);
g_free(dbitmap);
+ vfio_bitmap_dealloc(vbmap);
return ret;
}