qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 23/24] spapr: Fix vpa dispatch count for record-replay


From: Nicholas Piggin
Subject: [PATCH v4 23/24] spapr: Fix vpa dispatch count for record-replay
Date: Tue, 12 Mar 2024 03:40:25 +1000

The dispatch count is a field in guest memory that the hypervisor
increments when preempting and dispatching the guest. This was not
being done deterministically with respect to icount, because tcg
exec exit is not deterministic (e.g., an async event could cause it).

Change vpa dispatch count increment to keep track of whether the
vCPU is considered dispatched or not, and only consider it preempted
when calling cede / confer / join / stop-self / etc.

Cc: qemu-ppc@nongnu.org
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 include/hw/ppc/spapr_cpu_core.h |  3 +++
 hw/ppc/spapr.c                  | 36 ++-------------------------------
 hw/ppc/spapr_hcall.c            | 33 ++++++++++++++++++++++++++++++
 hw/ppc/spapr_rtas.c             |  1 +
 4 files changed, 39 insertions(+), 34 deletions(-)

diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index 69a52e39b8..5feb0ecc2a 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -47,6 +47,7 @@ typedef struct SpaprCpuState {
     uint64_t vpa_addr;
     uint64_t slb_shadow_addr, slb_shadow_size;
     uint64_t dtl_addr, dtl_size;
+    bool dispatched; /* for vpa dispatch counter tracking */
     bool prod; /* not migrated, only used to improve dispatch latencies */
     struct ICPState *icp;
     struct XiveTCTX *tctx;
@@ -61,4 +62,6 @@ static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)
     return (SpaprCpuState *)cpu->machine_data;
 }
 
+void vpa_dispatch(CPUState *cs, SpaprCpuState *spapr_cpu, bool dispatch);
+
 #endif
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 55263f0815..28ce5f263a 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4572,47 +4572,15 @@ static void spapr_cpu_exec_enter(PPCVirtualHypervisor 
*vhyp, PowerPCCPU *cpu)
 {
     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 
-    /* These are only called by TCG, KVM maintains dispatch state */
-
     spapr_cpu->prod = false;
-    if (spapr_cpu->vpa_addr) {
+    if (!spapr_cpu->dispatched) {
         CPUState *cs = CPU(cpu);
-        uint32_t dispatch;
-
-        dispatch = ldl_be_phys(cs->as,
-                               spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
-        dispatch++;
-        if ((dispatch & 1) != 0) {
-            qemu_log_mask(LOG_GUEST_ERROR,
-                          "VPA: incorrect dispatch counter value for "
-                          "dispatched partition %u, correcting.\n", dispatch);
-            dispatch++;
-        }
-        stl_be_phys(cs->as,
-                    spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
+        vpa_dispatch(cs, spapr_cpu, true);
     }
 }
 
 static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
 {
-    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
-
-    if (spapr_cpu->vpa_addr) {
-        CPUState *cs = CPU(cpu);
-        uint32_t dispatch;
-
-        dispatch = ldl_be_phys(cs->as,
-                               spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
-        dispatch++;
-        if ((dispatch & 1) != 1) {
-            qemu_log_mask(LOG_GUEST_ERROR,
-                          "VPA: incorrect dispatch counter value for "
-                          "preempted partition %u, correcting.\n", dispatch);
-            dispatch++;
-        }
-        stl_be_phys(cs->as,
-                    spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
-    }
 }
 
 static void spapr_machine_class_init(ObjectClass *oc, void *data)
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 75c2d12978..5f68c02739 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -487,6 +487,36 @@ static target_ulong h_register_vpa(PowerPCCPU *cpu, 
SpaprMachineState *spapr,
     return ret;
 }
 
+void vpa_dispatch(CPUState *cs, SpaprCpuState *spapr_cpu, bool dispatch)
+{
+    uint32_t counter;
+
+    if (!dispatch) {
+        assert(spapr_cpu->dispatched);
+    } else {
+        assert(!spapr_cpu->dispatched);
+    }
+    spapr_cpu->dispatched = dispatch;
+
+    return;
+
+    if (!spapr_cpu->vpa_addr) {
+        return;
+    }
+
+    /* These are only called by TCG, KVM maintains dispatch state */
+    counter = ldl_be_phys(cs->as, spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
+    counter++;
+    if ((counter & 1) != dispatch) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "VPA: incorrect dispatch counter value for "
+                      "%s partition %u, correcting.\n",
+                      dispatch ? "preempted" : "running", counter);
+        counter++;
+    }
+    stl_be_phys(cs->as, spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, counter);
+}
+
 static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
                            target_ulong opcode, target_ulong *args)
 {
@@ -505,6 +535,7 @@ static target_ulong h_cede(PowerPCCPU *cpu, 
SpaprMachineState *spapr,
 
     if (!cpu_has_work(cs)) {
         cs->halted = 1;
+        vpa_dispatch(cs, spapr_cpu, false);
         cs->exception_index = EXCP_HLT;
         cs->exit_request = 1;
         ppc_maybe_interrupt(env);
@@ -531,6 +562,8 @@ static target_ulong h_confer_self(PowerPCCPU *cpu)
     cs->exit_request = 1;
     ppc_maybe_interrupt(&cpu->env);
 
+    vpa_dispatch(cs, spapr_cpu, false);
+
     return H_SUCCESS;
 }
 
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index f329693c55..8ce4230223 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -216,6 +216,7 @@ static void rtas_stop_self(PowerPCCPU *cpu, 
SpaprMachineState *spapr,
      */
     env->spr[SPR_PSSCR] |= PSSCR_EC;
     cs->halted = 1;
+    vpa_dispatch(cs, spapr_cpu_state(cpu), false);
     ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm);
     kvmppc_set_reg_ppc_online(cpu, 0);
     qemu_cpu_kick(cs);
-- 
2.42.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]