[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-ppc] [RFC PATCH v2 4/5] xics: Use stable_cpu_id instead of cpu
From: |
David Gibson |
Subject: |
Re: [Qemu-ppc] [RFC PATCH v2 4/5] xics: Use stable_cpu_id instead of cpu_index in XICS code |
Date: |
Fri, 8 Jul 2016 15:32:56 +1000 |
User-agent: |
Mutt/1.6.1 (2016-04-27) |
On Thu, Jul 07, 2016 at 08:20:24PM +0530, Bharata B Rao wrote:
> xics maintains an array of ICPState structures which is indexed
> by cpu_index. Optionally change this to index the ICPState array by
> stable_cpu_id. When the use of stable_cpu_id is enabled from pseries-2.7
> onwards, this allows migration of guest to succeed when there are holes in
> cpu_index range due to CPU core hot removal.
You haven't changed the allocation path, so this will waste a bunch of
space if the stable IDs aren't dense. They always are for now, but
that might not be true for the upcoming powernv machine type.
> Signed-off-by: Bharata B Rao <address@hidden>
> ---
> hw/intc/xics.c | 21 +++++++++++++++++----
> hw/intc/xics_kvm.c | 10 ++++------
> hw/intc/xics_spapr.c | 29 +++++++++++++++++------------
> include/hw/ppc/xics.h | 1 +
> 4 files changed, 39 insertions(+), 22 deletions(-)
>
> diff --git a/hw/intc/xics.c b/hw/intc/xics.c
> index cd48f42..97ff3c5 100644
> --- a/hw/intc/xics.c
> +++ b/hw/intc/xics.c
> @@ -36,6 +36,17 @@
> #include "qemu/error-report.h"
> #include "qapi/visitor.h"
>
> +int xics_get_server(PowerPCCPU *cpu)
> +{
> + CPUState *cs = CPU(cpu);
> +
> + if (cs->has_stable_cpu_id) {
> + return cs->stable_cpu_id;
> + } else {
> + return cs->cpu_index;
> + }
> +}
I really think we want a generic helper that gets our best guess at a
stable id - the actual stable id if it's present, otherwise
cpu_index. I think a bunch of things are going to want this.
> +
> int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
> {
> PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
> @@ -50,9 +61,10 @@ int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
> void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu)
> {
> CPUState *cs = CPU(cpu);
> - ICPState *ss = &xics->ss[cs->cpu_index];
> + int server = xics_get_server(cpu);
> + ICPState *ss = &xics->ss[server];
>
> - assert(cs->cpu_index < xics->nr_servers);
> + assert(server < xics->nr_servers);
> assert(cs == ss->cs);
>
> ss->output = NULL;
> @@ -63,10 +75,11 @@ void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
> {
> CPUState *cs = CPU(cpu);
> CPUPPCState *env = &cpu->env;
> - ICPState *ss = &xics->ss[cs->cpu_index];
> + int server = xics_get_server(cpu);
> + ICPState *ss = &xics->ss[server];
> XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
>
> - assert(cs->cpu_index < xics->nr_servers);
> + assert(server < xics->nr_servers);
>
> ss->cs = cs;
>
> diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
> index edbd62f..f71b468 100644
> --- a/hw/intc/xics_kvm.c
> +++ b/hw/intc/xics_kvm.c
> @@ -326,14 +326,12 @@ static const TypeInfo ics_kvm_info = {
> */
> static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
> {
> - CPUState *cs;
> - ICPState *ss;
> + CPUState *cs = CPU(cpu);
> KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
> + int server = xics_get_server(cpu);
> + ICPState *ss = ss = &xics->ss[server];
>
> - cs = CPU(cpu);
> - ss = &xics->ss[cs->cpu_index];
> -
> - assert(cs->cpu_index < xics->nr_servers);
> + assert(server < xics->nr_servers);
> if (xicskvm->kernel_xics_fd == -1) {
> abort();
> }
> diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
> index 618826d..5491f82 100644
> --- a/hw/intc/xics_spapr.c
> +++ b/hw/intc/xics_spapr.c
> @@ -31,6 +31,7 @@
> #include "trace.h"
> #include "qemu/timer.h"
> #include "hw/ppc/spapr.h"
> +#include "hw/ppc/spapr_cpu_core.h"
> #include "hw/ppc/xics.h"
> #include "qapi/visitor.h"
> #include "qapi/error.h"
> @@ -42,17 +43,19 @@
> static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - CPUState *cs = CPU(cpu);
> + int server = xics_get_server(cpu);
> target_ulong cppr = args[0];
>
> - icp_set_cppr(spapr->xics, cs->cpu_index, cppr);
> + icp_set_cppr(spapr->xics, server, cppr);
> return H_SUCCESS;
> }
>
> static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - target_ulong server = xics_get_cpu_index_by_dt_id(args[0]);
> + CPUState *cs = CPU(cpu);
> + target_ulong server = cs->has_stable_cpu_id ? args[0] :
> + xics_get_cpu_index_by_dt_id(args[0]);
> target_ulong mfrr = args[1];
>
> if (server >= spapr->xics->nr_servers) {
> @@ -66,8 +69,8 @@ static target_ulong h_ipi(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - CPUState *cs = CPU(cpu);
> - uint32_t xirr = icp_accept(spapr->xics->ss + cs->cpu_index);
> + int server = xics_get_server(cpu);
> + uint32_t xirr = icp_accept(spapr->xics->ss + server);
>
> args[0] = xirr;
> return H_SUCCESS;
> @@ -76,8 +79,8 @@ static target_ulong h_xirr(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - CPUState *cs = CPU(cpu);
> - ICPState *ss = &spapr->xics->ss[cs->cpu_index];
> + int server = xics_get_server(cpu);
> + ICPState *ss = &spapr->xics->ss[server];
> uint32_t xirr = icp_accept(ss);
>
> args[0] = xirr;
> @@ -88,19 +91,19 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - CPUState *cs = CPU(cpu);
> + int server = xics_get_server(cpu);
> target_ulong xirr = args[0];
>
> - icp_eoi(spapr->xics, cs->cpu_index, xirr);
> + icp_eoi(spapr->xics, server, xirr);
> return H_SUCCESS;
> }
>
> static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> target_ulong opcode, target_ulong *args)
> {
> - CPUState *cs = CPU(cpu);
> + int server = xics_get_server(cpu);
> uint32_t mfrr;
> - uint32_t xirr = icp_ipoll(spapr->xics->ss + cs->cpu_index, &mfrr);
> + uint32_t xirr = icp_ipoll(spapr->xics->ss + server, &mfrr);
>
> args[0] = xirr;
> args[1] = mfrr;
> @@ -113,6 +116,7 @@ static void rtas_set_xive(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> uint32_t nargs, target_ulong args,
> uint32_t nret, target_ulong rets)
> {
> + CPUState *cs = CPU(cpu);
> ICSState *ics = spapr->xics->ics;
> uint32_t nr, server, priority;
>
> @@ -122,7 +126,8 @@ static void rtas_set_xive(PowerPCCPU *cpu,
> sPAPRMachineState *spapr,
> }
>
> nr = rtas_ld(args, 0);
> - server = xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
> + server = cs->has_stable_cpu_id ? rtas_ld(args, 1) :
> + xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
> priority = rtas_ld(args, 2);
>
> if (!ics_valid_irq(ics, nr) || (server >= ics->xics->nr_servers)
> diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h
> index 6189a3b..aea0678 100644
> --- a/include/hw/ppc/xics.h
> +++ b/include/hw/ppc/xics.h
> @@ -195,5 +195,6 @@ void ics_write_xive(ICSState *ics, int nr, int server,
> void ics_set_irq_type(ICSState *ics, int srcno, bool lsi);
>
> int xics_find_source(XICSState *icp, int irq);
> +int xics_get_server(PowerPCCPU *cpu);
>
> #endif /* __XICS_H__ */
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
signature.asc
Description: PGP signature
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, (continued)
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, David Gibson, 2016/07/08
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, Bharata B Rao, 2016/07/08
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, David Gibson, 2016/07/08
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, Igor Mammedov, 2016/07/08
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, Bharata B Rao, 2016/07/10
- Re: [Qemu-ppc] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, David Gibson, 2016/07/10
- Re: [Qemu-ppc] [Qemu-devel] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, Igor Mammedov, 2016/07/11
- Re: [Qemu-ppc] [Qemu-devel] [RFC PATCH v2 3/5] spapr: Set stable_cpu_id for threads of CPU cores, David Gibson, 2016/07/12
[Qemu-ppc] [RFC PATCH v2 5/5] spapr: Enable the use of stable_cpu_id from pseries-2.7 onwards, Bharata B Rao, 2016/07/07
[Qemu-ppc] [RFC PATCH v2 4/5] xics: Use stable_cpu_id instead of cpu_index in XICS code, Bharata B Rao, 2016/07/07
- Re: [Qemu-ppc] [RFC PATCH v2 4/5] xics: Use stable_cpu_id instead of cpu_index in XICS code,
David Gibson <=
Re: [Qemu-ppc] [RFC PATCH v2 0/5] sPAPR: Fix migration when CPUs are removed in random order, Greg Kurz, 2016/07/07