qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 8/8] target/ppc: Add SMT support to time facilities


From: Cédric Le Goater
Subject: Re: [PATCH v3 8/8] target/ppc: Add SMT support to time facilities
Date: Tue, 23 Jan 2024 08:22:14 +0100
User-agent: Mozilla Thunderbird

On 12/1/23 13:16, Nicholas Piggin wrote:
The TB, VTB, PURR, HDEC SPRs are per-LPAR registers, and the TFMR is a
per-core register. Add the necessary SMT synchronisation and value
sharing.

The TFMR can only drive the timebase state machine via thread 0 of the
core, which is almost certainly not right, but it is enough for skiboot
and certain other proprietary firmware.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>


Acked-by: Cédric Le Goater <clg@kaod.org>

Thanks,

C.


---
  target/ppc/timebase_helper.c | 105 ++++++++++++++++++++++++++++++++---
  target/ppc/translate.c       |  42 +++++++++++++-
  2 files changed, 136 insertions(+), 11 deletions(-)

diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
index bc1d54a427..a23fbf75ff 100644
--- a/target/ppc/timebase_helper.c
+++ b/target/ppc/timebase_helper.c
@@ -60,19 +60,55 @@ target_ulong helper_load_purr(CPUPPCState *env)
void helper_store_purr(CPUPPCState *env, target_ulong val)
  {
-    cpu_ppc_store_purr(env, val);
+    CPUState *cs = env_cpu(env);
+    CPUState *ccs;
+    uint32_t nr_threads = cs->nr_threads;
+
+    if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+        cpu_ppc_store_purr(env, val);
+        return;
+    }
+
+    THREAD_SIBLING_FOREACH(cs, ccs) {
+        CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+        cpu_ppc_store_purr(cenv, val);
+    }
  }
  #endif
#if !defined(CONFIG_USER_ONLY)
  void helper_store_tbl(CPUPPCState *env, target_ulong val)
  {
-    cpu_ppc_store_tbl(env, val);
+    CPUState *cs = env_cpu(env);
+    CPUState *ccs;
+    uint32_t nr_threads = cs->nr_threads;
+
+    if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+        cpu_ppc_store_tbl(env, val);
+        return;
+    }
+
+    THREAD_SIBLING_FOREACH(cs, ccs) {
+        CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+        cpu_ppc_store_tbl(cenv, val);
+    }
  }
void helper_store_tbu(CPUPPCState *env, target_ulong val)
  {
-    cpu_ppc_store_tbu(env, val);
+    CPUState *cs = env_cpu(env);
+    CPUState *ccs;
+    uint32_t nr_threads = cs->nr_threads;
+
+    if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+        cpu_ppc_store_tbu(env, val);
+        return;
+    }
+
+    THREAD_SIBLING_FOREACH(cs, ccs) {
+        CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+        cpu_ppc_store_tbu(cenv, val);
+    }
  }
void helper_store_atbl(CPUPPCState *env, target_ulong val)
@@ -102,17 +138,53 @@ target_ulong helper_load_hdecr(CPUPPCState *env)
void helper_store_hdecr(CPUPPCState *env, target_ulong val)
  {
-    cpu_ppc_store_hdecr(env, val);
+    CPUState *cs = env_cpu(env);
+    CPUState *ccs;
+    uint32_t nr_threads = cs->nr_threads;
+
+    if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+        cpu_ppc_store_hdecr(env, val);
+        return;
+    }
+
+    THREAD_SIBLING_FOREACH(cs, ccs) {
+        CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+        cpu_ppc_store_hdecr(cenv, val);
+    }
  }
void helper_store_vtb(CPUPPCState *env, target_ulong val)
  {
-    cpu_ppc_store_vtb(env, val);
+    CPUState *cs = env_cpu(env);
+    CPUState *ccs;
+    uint32_t nr_threads = cs->nr_threads;
+
+    if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+        cpu_ppc_store_vtb(env, val);
+        return;
+    }
+
+    THREAD_SIBLING_FOREACH(cs, ccs) {
+        CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+        cpu_ppc_store_vtb(cenv, val);
+    }
  }
void helper_store_tbu40(CPUPPCState *env, target_ulong val)
  {
-    cpu_ppc_store_tbu40(env, val);
+    CPUState *cs = env_cpu(env);
+    CPUState *ccs;
+    uint32_t nr_threads = cs->nr_threads;
+
+    if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+        cpu_ppc_store_tbu40(env, val);
+        return;
+    }
+
+    THREAD_SIBLING_FOREACH(cs, ccs) {
+        CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+        cpu_ppc_store_tbu40(cenv, val);
+    }
  }
target_ulong helper_load_40x_pit(CPUPPCState *env)
@@ -211,6 +283,21 @@ static uint64_t tfmr_new_tb_state(uint64_t tfmr, unsigned 
int tbst)
      return tfmr;
  }
+static void write_tfmr(CPUPPCState *env, target_ulong val)
+{
+    CPUState *cs = env_cpu(env);
+
+    if (cs->nr_threads == 1) {
+        env->spr[SPR_TFMR] = val;
+    } else {
+        CPUState *ccs;
+        THREAD_SIBLING_FOREACH(cs, ccs) {
+            CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+            cenv->spr[SPR_TFMR] = val;
+        }
+    }
+}
+
  static void tb_state_machine_step(CPUPPCState *env)
  {
      uint64_t tfmr = env->spr[SPR_TFMR];
@@ -224,7 +311,7 @@ static void tb_state_machine_step(CPUPPCState *env)
          env->pnv_tod_tbst.tb_sync_pulse_timer--;
      } else {
          tfmr |= TFMR_TB_SYNC_OCCURED;
-        env->spr[SPR_TFMR] = tfmr;
+        write_tfmr(env, tfmr);
      }
if (env->pnv_tod_tbst.tb_state_timer) {
@@ -262,7 +349,7 @@ static void tb_state_machine_step(CPUPPCState *env)
          }
      }
- env->spr[SPR_TFMR] = tfmr;
+    write_tfmr(env, tfmr);
  }
target_ulong helper_load_tfmr(CPUPPCState *env)
@@ -357,7 +444,7 @@ void helper_store_tfmr(CPUPPCState *env, target_ulong val)
      }
out:
-    env->spr[SPR_TFMR] = tfmr;
+    write_tfmr(env, tfmr);
  }
  #endif
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 329da4d518..bd103b1026 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -247,13 +247,24 @@ static inline bool gen_serialize(DisasContext *ctx)
      return true;
  }
-#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+static inline bool gen_serialize_core(DisasContext *ctx)
+{
+    if (ctx->flags & POWERPC_FLAG_SMT) {
+        return gen_serialize(ctx);
+    }
+    return true;
+}
+#endif
+
  static inline bool gen_serialize_core_lpar(DisasContext *ctx)
  {
+#if defined(TARGET_PPC64)
      if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
          return gen_serialize(ctx);
      }
-
+#endif
      return true;
  }
  #endif
@@ -667,12 +678,20 @@ void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
  #if !defined(CONFIG_USER_ONLY)
  void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core_lpar(ctx)) {
+        return;
+    }
+
      translator_io_start(&ctx->base);
      gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]);
  }
void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core_lpar(ctx)) {
+        return;
+    }
+
      translator_io_start(&ctx->base);
      gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]);
  }
@@ -696,6 +715,9 @@ void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core_lpar(ctx)) {
+        return;
+    }
      translator_io_start(&ctx->base);
      gen_helper_store_purr(tcg_env, cpu_gpr[gprn]);
  }
@@ -709,6 +731,9 @@ void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core_lpar(ctx)) {
+        return;
+    }
      translator_io_start(&ctx->base);
      gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]);
  }
@@ -721,12 +746,18 @@ void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core_lpar(ctx)) {
+        return;
+    }
      translator_io_start(&ctx->base);
      gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]);
  }
void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core_lpar(ctx)) {
+        return;
+    }
      translator_io_start(&ctx->base);
      gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]);
  }
@@ -1220,11 +1251,18 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int 
gprn)
void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
  {
+    /* Reading TFMR can cause it to be updated, so serialize threads here too 
*/
+    if (!gen_serialize_core(ctx)) {
+        return;
+    }
      gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env);
  }
void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
  {
+    if (!gen_serialize_core(ctx)) {
+        return;
+    }
      gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]);
  }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]