qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v8 4/7] spapr_numa.c: rename numa_assoc_array to FORM1_assoc_


From: Daniel Henrique Barboza
Subject: Re: [PATCH v8 4/7] spapr_numa.c: rename numa_assoc_array to FORM1_assoc_array
Date: Mon, 20 Sep 2021 10:39:19 -0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Thunderbird/91.1.0



On 9/20/21 06:21, Greg Kurz wrote:
On Fri, 17 Sep 2021 18:27:59 -0300
Daniel Henrique Barboza <danielhb413@gmail.com> wrote:

Introducing a new NUMA affinity, FORM2, requires a new mechanism to
switch between affinity modes after CAS. Also, we want FORM2 data
structures and functions to be completely separated from the existing
FORM1 code, allowing us to avoid adding new code that inherits the
existing complexity of FORM1.

The idea of switching values used by the write_dt() functions in
spapr_numa.c was already introduced in the previous patch, and
the same approach will be used when dealing with the FORM1 and FORM2
arrays.

We can accomplish that by that by renaming the existing numa_assoc_array
to FORM1_assoc_array, which now is used exclusively to handle FORM1 affinity
data. A new helper get_associativity() is then introduced to be used by the
write_dt() functions to retrieve the current ibm,associativity array of
a given node, after considering affinity selection that might have been
done during CAS. All code that was using numa_assoc_array now needs to
retrieve the array by calling this function.

This will allow for an easier plug of FORM2 data later on.

Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---

This looks good. Just one suggestion, see below.

  hw/ppc/spapr_hcall.c   |  1 +
  hw/ppc/spapr_numa.c    | 38 +++++++++++++++++++++++++-------------
  include/hw/ppc/spapr.h |  2 +-
  3 files changed, 27 insertions(+), 14 deletions(-)

diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 0e9a5b2e40..9056644890 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -17,6 +17,7 @@
  #include "kvm_ppc.h"
  #include "hw/ppc/fdt.h"
  #include "hw/ppc/spapr_ovec.h"
+#include "hw/ppc/spapr_numa.h"
  #include "mmu-book3s-v3.h"
  #include "hw/mem/memory-device.h"
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index 08e2d6aed8..7339d00d20 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -46,6 +46,15 @@ static int get_vcpu_assoc_size(SpaprMachineState *spapr)
      return get_numa_assoc_size(spapr) + 1;
  }
+/*
+ * Retrieves the ibm,associativity array of NUMA node 'node_id'
+ * for the current NUMA affinity.
+ */
+static uint32_t *get_associativity(SpaprMachineState *spapr, int node_id)
+{
+    return spapr->FORM1_assoc_array[node_id];
+}

All users of this helper only need to read the content of the
associativity array. And since these arrays are static, the
returned pointer should certainly not be passed to g_free()
for example. This wouldn't be detected by compilers though,
unless you have the helper to return a pointer to const
data. So I suggest you just do that for extra safety.

Good point. I'll be more vigilant about the use of const pointers in these
cases. The extra safety is always welcome.


Daniel


+
  static bool spapr_numa_is_symmetrical(MachineState *ms)
  {
      int src, dst;
@@ -124,7 +133,7 @@ static void 
spapr_numa_define_FORM1_domains(SpaprMachineState *spapr)
       */
      for (i = 1; i < nb_numa_nodes; i++) {
          for (j = 1; j < FORM1_DIST_REF_POINTS; j++) {
-            spapr->numa_assoc_array[i][j] = cpu_to_be32(i);
+            spapr->FORM1_assoc_array[i][j] = cpu_to_be32(i);
          }
      }
@@ -176,8 +185,8 @@ static void spapr_numa_define_FORM1_domains(SpaprMachineState *spapr)
               * and going up to 0x1.
               */
              for (i = n_level; i > 0; i--) {
-                assoc_src = spapr->numa_assoc_array[src][i];
-                spapr->numa_assoc_array[dst][i] = assoc_src;
+                assoc_src = spapr->FORM1_assoc_array[src][i];
+                spapr->FORM1_assoc_array[dst][i] = assoc_src;
              }
          }
      }
@@ -204,8 +213,8 @@ static void 
spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr,
       * 'i' will be a valid node_id set by the user.
       */
      for (i = 0; i < nb_numa_nodes; i++) {
-        spapr->numa_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS);
-        spapr->numa_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i);
+        spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS);
+        spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i);
      }
/*
@@ -219,15 +228,15 @@ static void 
spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr,
      max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM;
for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) {
-        spapr->numa_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS);
+        spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS);
for (j = 1; j < FORM1_DIST_REF_POINTS; j++) {
              uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ?
                                   SPAPR_GPU_NUMA_ID : cpu_to_be32(i);
-            spapr->numa_assoc_array[i][j] = gpu_assoc;
+            spapr->FORM1_assoc_array[i][j] = gpu_assoc;
          }
- spapr->numa_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i);
+        spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i);
      }
/*
@@ -259,8 +268,10 @@ void spapr_numa_associativity_init(SpaprMachineState 
*spapr,
  void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
                                         int offset, int nodeid)
  {
+    uint32_t *associativity = get_associativity(spapr, nodeid);
+
      _FDT((fdt_setprop(fdt, offset, "ibm,associativity",
-                      spapr->numa_assoc_array[nodeid],
+                      associativity,
                        get_numa_assoc_size(spapr) * sizeof(uint32_t))));
  }
@@ -270,6 +281,7 @@ static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
      int max_distance_ref_points = get_max_dist_ref_points(spapr);
      int vcpu_assoc_size = get_vcpu_assoc_size(spapr);
      uint32_t *vcpu_assoc = g_new(uint32_t, vcpu_assoc_size);
+    uint32_t *associativity = get_associativity(spapr, cpu->node_id);
      int index = spapr_get_vcpu_id(cpu);
/*
@@ -280,7 +292,7 @@ static uint32_t 
*spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
       */
      vcpu_assoc[0] = cpu_to_be32(max_distance_ref_points + 1);
      vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
-    memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
+    memcpy(vcpu_assoc + 1, associativity + 1,
             (vcpu_assoc_size - 2) * sizeof(uint32_t));
return vcpu_assoc;
@@ -319,10 +331,10 @@ int 
spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
      cur_index += 2;
      for (i = 0; i < nr_nodes; i++) {
          /*
-         * For the lookup-array we use the ibm,associativity array,
-         * from numa_assoc_array. without the first element (size).
+         * For the lookup-array we use the ibm,associativity array of the
+         * current NUMA affinity, without the first element (size).
           */
-        uint32_t *associativity = spapr->numa_assoc_array[i];
+        uint32_t *associativity = get_associativity(spapr, i);
          memcpy(cur_index, ++associativity,
                 sizeof(uint32_t) * max_distance_ref_points);
          cur_index += max_distance_ref_points;
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 814e087e98..6b3dfc5dc2 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -249,7 +249,7 @@ struct SpaprMachineState {
      unsigned gpu_numa_id;
      SpaprTpmProxy *tpm_proxy;
- uint32_t numa_assoc_array[NUMA_NODES_MAX_NUM][FORM1_NUMA_ASSOC_SIZE];
+    uint32_t FORM1_assoc_array[NUMA_NODES_MAX_NUM][FORM1_NUMA_ASSOC_SIZE];
Error *fwnmi_migration_blocker;
  };




reply via email to

[Prev in Thread] Current Thread [Next in Thread]