qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 08/15] spapr: nested: Introduce H_GUEST_CREATE_VCPU hcall.


From: Harsh Prateek Bora
Subject: Re: [PATCH v4 08/15] spapr: nested: Introduce H_GUEST_CREATE_VCPU hcall.
Date: Thu, 29 Feb 2024 15:09:36 +0530
User-agent: Mozilla Thunderbird



On 2/27/24 15:21, Nicholas Piggin wrote:
On Tue Feb 20, 2024 at 6:36 PM AEST, Harsh Prateek Bora wrote:
Introduce the nested PAPR hcall H_GUEST_CREATE_VCPU which is used to
create and initialize the specified VCPU resource for the previously
created guest. Each guest can have multiple VCPUs upto max 2048.
All VCPUs for a guest gets deallocated on guest delete.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
---
  include/hw/ppc/spapr.h        |  2 +
  include/hw/ppc/spapr_nested.h | 10 ++++
  hw/ppc/spapr_nested.c         | 96 +++++++++++++++++++++++++++++++++++
  3 files changed, 108 insertions(+)

diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index c4a79a1785..82b077bdd2 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -365,6 +365,7 @@ struct SpaprMachineState {
  #define H_UNSUPPORTED     -67
  #define H_OVERLAP         -68
  #define H_STATE           -75
+#define H_IN_USE          -77
  #define H_UNSUPPORTED_FLAG -256
  #define H_MULTI_THREADS_ACTIVE -9005
@@ -587,6 +588,7 @@ struct SpaprMachineState {
  #define H_GUEST_GET_CAPABILITIES 0x460
  #define H_GUEST_SET_CAPABILITIES 0x464
  #define H_GUEST_CREATE           0x470
+#define H_GUEST_CREATE_VCPU      0x474
  #define H_GUEST_DELETE           0x488
#define MAX_HCALL_OPCODE H_GUEST_DELETE
diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h
index f282479275..24e87bca08 100644
--- a/include/hw/ppc/spapr_nested.h
+++ b/include/hw/ppc/spapr_nested.h
@@ -14,6 +14,8 @@ typedef struct SpaprMachineStateNested {
typedef struct SpaprMachineStateNestedGuest {
      uint32_t pvr_logical;
+    unsigned long vcpus;
+    struct SpaprMachineStateNestedGuestVcpu *vcpu;
  } SpaprMachineStateNestedGuest;
/* Nested PAPR API related macros */
@@ -27,6 +29,7 @@ typedef struct SpaprMachineStateNestedGuest {
  #define H_GUEST_CAP_P10_MODE_BMAP     2
  #define PAPR_NESTED_GUEST_MAX         4096
  #define H_GUEST_DELETE_ALL_FLAG       0x8000000000000000ULL
+#define PAPR_NESTED_GUEST_VCPU_MAX    2048
/*
   * Register state for entering a nested guest with H_ENTER_NESTED.
@@ -118,8 +121,15 @@ struct nested_ppc_state {
      uint64_t ppr;
int64_t tb_offset;
+    /* Nested PAPR API */
+    uint64_t pvr;
  };
+typedef struct SpaprMachineStateNestedGuestVcpu {
+    bool enabled;
+    struct nested_ppc_state state;
+} SpaprMachineStateNestedGuestVcpu;
+
  void spapr_exit_nested(PowerPCCPU *cpu, int excp);
  typedef struct SpaprMachineState SpaprMachineState;
  bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c
index 09c4a35908..3cc704adda 100644
--- a/hw/ppc/spapr_nested.c
+++ b/hw/ppc/spapr_nested.c
@@ -428,6 +428,41 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
      }
  }
+static
+SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr,
+                                                     target_ulong guestid)
+{
+    SpaprMachineStateNestedGuest *guest;
+
+    guest = g_hash_table_lookup(spapr->nested.guests, 
GINT_TO_POINTER(guestid));
+    return guest;
+}
+
+static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest,
+                                    target_ulong vcpuid)
+{
+    struct SpaprMachineStateNestedGuestVcpu *vcpu;
+    /*
+     * Perform sanity checks for the provided vcpuid of a guest.
+     * For now, ensure its valid, allocated and enabled for use.
+     */
+
+    if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) {
+        return false;
+    }
+
+    if (!(vcpuid < guest->vcpus)) {
+        return false;
+    }
+
+    vcpu = &guest->vcpu[vcpuid];
+    if (!vcpu->enabled) {
+        return false;
+    }
+
+    return true;
+}
+
  static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu,
                                               SpaprMachineState *spapr,
                                               target_ulong opcode,
@@ -518,6 +553,7 @@ static void
  destroy_guest_helper(gpointer value)
  {
      struct SpaprMachineStateNestedGuest *guest = value;
+    g_free(guest->vcpu);
      g_free(guest);
  }
@@ -613,6 +649,65 @@ static target_ulong h_guest_delete(PowerPCCPU *cpu,
      return H_SUCCESS;
  }
+static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu,
+                                        SpaprMachineState *spapr,
+                                        target_ulong opcode,
+                                        target_ulong *args)
+{
+    CPUPPCState *env = &cpu->env;
+    struct nested_ppc_state *l2_state;
+    target_ulong flags = args[0];
+    target_ulong guestid = args[1];
+    target_ulong vcpuid = args[2];
+    SpaprMachineStateNestedGuest *guest;
+
+    if (flags) { /* don't handle any flags for now */
+        return H_UNSUPPORTED_FLAG;
+    }
+
+    guest = spapr_get_nested_guest(spapr, guestid);
+    if (!guest) {
+        return H_P2;
+    }
+
+    if (vcpuid < guest->vcpus) {
+        return H_IN_USE;
+    }

Linear allocation isn't really a constraint of the API right? I
would add an UNIMP log message to say what the problem is otherwise
hypervisor developer might struggle to understand the problem.


Sure, will add an UNIMP log msg and move the below assert up here.

+
+    if (guest->vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) {
+        return H_P3;
+    }
+
+    if (guest->vcpus) {
+        SpaprMachineStateNestedGuestVcpu *vcpus;
+        vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu,
+                            guest->vcpu,
+                            guest->vcpus + 1);
+        if (!vcpus) {
+            return H_NO_MEM;
+        }
+        memset(&vcpus[guest->vcpus], 0,
+               sizeof(SpaprMachineStateNestedGuestVcpu));
+        guest->vcpu = vcpus;
+    } else {
+        guest->vcpu = g_try_new0(SpaprMachineStateNestedGuestVcpu, 1);
+        if (guest->vcpu == NULL) {
+            return H_NO_MEM;
+        }
+    }

g_try_renew works with NULL AFAIKS, so no need for the branch. I would
also create a local variable for the new nested guest vcpu created
here so you only have to index it once.


Ok, will update.

+    l2_state = &guest->vcpu[guest->vcpus].state;
+    guest->vcpus++;
+    assert(vcpuid < guest->vcpus); /* linear vcpuid allocation only */

Target can trigger this assert if using a smaller vcpu id number than
already allocated? I would check above that it is exactly equal to
vcpus, and remove it from here.

Moving up as suggested.


+    /* Set L1 PVR as L2 default */
+    l2_state->pvr = env->spr[SPR_PVR];

Why is this here and not in H_GUEST_CREATE? I think you can use pcc->pvr
for this?

This is vcpu specific nested_ppc_state which needs to be initialized on vcpu creation. We can assign it to spapr->nested.pvr_base as well.

+    guest->vcpu[vcpuid].enabled = true;
+
+    if (!spapr_nested_vcpu_check(guest, vcpuid)) {
+        return H_PARAMETER;
+    }

This doesn't clean up on failure. Just remove "sanity" checks if they
are already checked in the same function. Any useful ones should be
properly hanlded or done before cleanup is needed.

We can actually remove this sanity check since it is already taken care above and move this helper in a later patch where used.


If you're respinning could you call vcpus nr_vcpus, then call vcpu
(the array of vcpus) vcpus.

Sure, will update.


Thanks,
Nick



reply via email to

[Prev in Thread] Current Thread [Next in Thread]