qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] migration/multifd: Remove redundant copy of page offsets during


From: Nikolay Borisov
Subject: [PATCH] migration/multifd: Remove redundant copy of page offsets during send
Date: Wed, 10 Aug 2022 13:39:42 +0300

All pages which are going to be migrated are first added to
MultiFDSendParams::MultiFDPages_t::offset array by the main migration
thread and are subsequently copied to MultiFDSendParams::normal by the
multifd thread. This is really unnecessary as the MultiFDPages_t is
guaranteed to be stable since its mutex is being held. Additionally,
this somewhat simplifies the code as the migration pages are now kept
in only 1 place during send, also the name 'offset' coupled with the
context it's being used - usually added to the host pages makes it
obvious that this is an offset.

With this change normal/normal_num are no longer used in the multifd
send path.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 migration/multifd-zlib.c |  6 +++---
 migration/multifd-zstd.c |  6 +++---
 migration/multifd.c      | 25 ++++++++++---------------
 3 files changed, 16 insertions(+), 21 deletions(-)

diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
index 18213a951302..363b64e95922 100644
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -122,11 +122,11 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error 
**errp)
     int ret;
     uint32_t i;

-    for (i = 0; i < p->normal_num; i++) {
+    for (i = 0; i < p->pages->num; i++) {
         uint32_t available = z->zbuff_len - out_size;
         int flush = Z_NO_FLUSH;

-        if (i == p->normal_num - 1) {
+        if (i == p->pages->num - 1) {
             flush = Z_SYNC_FLUSH;
         }

@@ -135,7 +135,7 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error 
**errp)
          * with compression. zlib does not guarantee that this is safe,
          * therefore copy the page before calling deflate().
          */
-        memcpy(z->buf, p->pages->block->host + p->normal[i], page_size);
+        memcpy(z->buf, p->pages->block->host + p->pages->offset[i], page_size);
         zs->avail_in = page_size;
         zs->next_in = z->buf;

diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
index d788d309f22e..4daec8366727 100644
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -121,13 +121,13 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error 
**errp)
     z->out.size = z->zbuff_len;
     z->out.pos = 0;

-    for (i = 0; i < p->normal_num; i++) {
+    for (i = 0; i < p->pages->num; i++) {
         ZSTD_EndDirective flush = ZSTD_e_continue;

-        if (i == p->normal_num - 1) {
+        if (i == p->pages->num - 1) {
             flush = ZSTD_e_flush;
         }
-        z->in.src = p->pages->block->host + p->normal[i];
+        z->in.src = p->pages->block->host + p->pages->offset[i];
         z->in.size = page_size;
         z->in.pos = 0;

diff --git a/migration/multifd.c b/migration/multifd.c
index 586ddc9d657a..d70662406490 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -89,13 +89,13 @@ static int nocomp_send_prepare(MultiFDSendParams *p, Error 
**errp)
     MultiFDPages_t *pages = p->pages;
     size_t page_size = qemu_target_page_size();

-    for (int i = 0; i < p->normal_num; i++) {
-        p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i];
+    for (int i = 0; i < pages->num; i++) {
+        p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i];
         p->iov[p->iovs_num].iov_len = page_size;
         p->iovs_num++;
     }

-    p->next_packet_size = p->normal_num * page_size;
+    p->next_packet_size = pages->num * page_size;
     p->flags |= MULTIFD_FLAG_NOCOMP;
     return 0;
 }
@@ -262,7 +262,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)

     packet->flags = cpu_to_be32(p->flags);
     packet->pages_alloc = cpu_to_be32(p->pages->allocated);
-    packet->normal_pages = cpu_to_be32(p->normal_num);
+    packet->normal_pages = cpu_to_be32(p->pages->num);
     packet->next_packet_size = cpu_to_be32(p->next_packet_size);
     packet->packet_num = cpu_to_be64(p->packet_num);

@@ -270,9 +270,9 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
         strncpy(packet->ramblock, p->pages->block->idstr, 256);
     }

-    for (i = 0; i < p->normal_num; i++) {
+    for (i = 0; i < p->pages->num; i++) {
         /* there are architectures where ram_addr_t is 32 bit */
-        uint64_t temp = p->normal[i];
+        uint64_t temp = p->pages->offset[i];

         packet->offset[i] = cpu_to_be64(temp);
     }
@@ -668,7 +668,7 @@ static void *multifd_send_thread(void *opaque)
         if (p->pending_job) {
             uint64_t packet_num = p->packet_num;
             uint32_t flags = p->flags;
-            p->normal_num = 0;
+            uint32_t num_pages = p->pages->num;

             if (use_zero_copy_send) {
                 p->iovs_num = 0;
@@ -676,12 +676,7 @@ static void *multifd_send_thread(void *opaque)
                 p->iovs_num = 1;
             }

-            for (int i = 0; i < p->pages->num; i++) {
-                p->normal[p->normal_num] = p->pages->offset[i];
-                p->normal_num++;
-            }
-
-            if (p->normal_num) {
+            if (num_pages) {
                 ret = multifd_send_state->ops->send_prepare(p, &local_err);
                 if (ret != 0) {
                     qemu_mutex_unlock(&p->mutex);
@@ -691,12 +686,12 @@ static void *multifd_send_thread(void *opaque)
             multifd_send_fill_packet(p);
             p->flags = 0;
             p->num_packets++;
-            p->total_normal_pages += p->normal_num;
+            p->total_normal_pages += num_pages;
             p->pages->num = 0;
             p->pages->block = NULL;
             qemu_mutex_unlock(&p->mutex);

-            trace_multifd_send(p->id, packet_num, p->normal_num, flags,
+            trace_multifd_send(p->id, packet_num, num_pages, flags,
                                p->next_packet_size);

             if (use_zero_copy_send) {
--
2.25.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]