qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 10/16] migration: Don't abuse qemu_file transferred for RDMA


From: Juan Quintela
Subject: [PATCH v2 10/16] migration: Don't abuse qemu_file transferred for RDMA
Date: Mon, 15 May 2023 21:57:03 +0200

Just create a variable for it, the same way that multifd does.  This
way it is safe to use for other thread, etc, etc.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/migration-stats.h |  4 ++++
 migration/migration-stats.c |  5 +++--
 migration/rdma.c            | 22 ++++++++++++++++++++--
 migration/trace-events      |  2 +-
 4 files changed, 28 insertions(+), 5 deletions(-)

diff --git a/migration/migration-stats.h b/migration/migration-stats.h
index 9568b5b473..2e3e894307 100644
--- a/migration/migration-stats.h
+++ b/migration/migration-stats.h
@@ -89,6 +89,10 @@ typedef struct {
      * Maximum amount of data we can send in a cycle.
      */
     Stat64 rate_limit_max;
+    /*
+     * Number of bytes sent through RDMA.
+     */
+    Stat64 rdma_bytes;
     /*
      * How long has the setup stage took.
      */
diff --git a/migration/migration-stats.c b/migration/migration-stats.c
index abf2d38b18..4d8e9f93b7 100644
--- a/migration/migration-stats.c
+++ b/migration/migration-stats.c
@@ -68,8 +68,9 @@ void migration_rate_reset(QEMUFile *f)
 uint64_t migration_transferred_bytes(QEMUFile *f)
 {
     uint64_t multifd = stat64_get(&mig_stats.multifd_bytes);
+    uint64_t rdma = stat64_get(&mig_stats.rdma_bytes);
     uint64_t qemu_file = qemu_file_transferred(f);
 
-    trace_migration_transferred_bytes(qemu_file, multifd);
-    return qemu_file + multifd;
+    trace_migration_transferred_bytes(qemu_file, multifd, rdma);
+    return qemu_file + multifd + rdma;
 }
diff --git a/migration/rdma.c b/migration/rdma.c
index 2e4dcff1c9..074456f9df 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -2122,9 +2122,18 @@ retry:
                     return -EIO;
                 }
 
+                /*
+                 * TODO: Here we are sending something, but we are not
+                 * accounting for anything transferred.  The following is 
wrong:
+                 *
+                 * stat64_add(&mig_stats.rdma_bytes, sge.length);
+                 *
+                 * because we are using some kind of compression.  I
+                 * would think that head.len would be the more similar
+                 * thing to a correct value.
+                 */
                 stat64_add(&mig_stats.zero_pages,
                            sge.length / qemu_target_page_size());
-
                 return 1;
             }
 
@@ -2232,8 +2241,17 @@ retry:
 
     set_bit(chunk, block->transit_bitmap);
     stat64_add(&mig_stats.normal_pages, sge.length / qemu_target_page_size());
+    /*
+     * We are adding to transferred the amount of data written, but no
+     * overhead at all.  I will asume that RDMA is magicaly and don't
+     * need to transfer (at least) the addresses where it wants to
+     * write the pages.  Here it looks like it should be something
+     * like:
+     *     sizeof(send_wr) + sge.length
+     * but this being RDMA, who knows.
+     */
+    stat64_add(&mig_stats.rdma_bytes, sge.length);
     ram_transferred_add(sge.length);
-    qemu_file_credit_transfer(f, sge.length);
     rdma->total_writes++;
 
     return 0;
diff --git a/migration/trace-events b/migration/trace-events
index cdaef7a1ea..54ae5653fd 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -187,7 +187,7 @@ process_incoming_migration_co_postcopy_end_main(void) ""
 postcopy_preempt_enabled(bool value) "%d"
 
 # migration-stats
-migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd) "qemu_file 
%" PRIu64 " multifd %" PRIu64
+migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd, uint64_t 
rdma) "qemu_file %" PRIu64 " multifd %" PRIu64 " RDMA %" PRIu64
 
 # channel.c
 migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p 
ioctype=%s"
-- 
2.40.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]