qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 1/3] memory: Extract mtree_info_flatview() from mtree_info


From: Philippe Mathieu-Daudé
Subject: Re: [PATCH v4 1/3] memory: Extract mtree_info_flatview() from mtree_info()
Date: Wed, 1 Sep 2021 18:23:17 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.11.0

On 9/1/21 6:19 PM, Philippe Mathieu-Daudé wrote:
> While mtree_info() handles both ASes and flatviews cases,
> the two cases share basically no code. Split mtree_info_flatview()
> out of mtree_info() to simplify.
> 
> Note: Patch easier to review using 'git-diff --color-moved=blocks'.

Surprisingly git-format-patch choose a better algorithm automatically...

> Suggested-by: Peter Maydell <peter.maydell@linaro.org>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
> ---
>  softmmu/memory.c | 72 ++++++++++++++++++++++++++----------------------
>  1 file changed, 39 insertions(+), 33 deletions(-)
> 
> diff --git a/softmmu/memory.c b/softmmu/memory.c
> index bfedaf9c4df..3eb6f52de67 100644
> --- a/softmmu/memory.c
> +++ b/softmmu/memory.c
> @@ -3246,6 +3246,44 @@ static gboolean mtree_info_flatview_free(gpointer key, 
> gpointer value,
>      return true;
>  }
>  
> +static void mtree_info_flatview(bool dispatch_tree, bool owner)
> +{
> +    struct FlatViewInfo fvi = {
> +        .counter = 0,
> +        .dispatch_tree = dispatch_tree,
> +        .owner = owner,
> +    };
> +    AddressSpace *as;
> +    FlatView *view;
> +    GArray *fv_address_spaces;
> +    GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
> +    AccelClass *ac = ACCEL_GET_CLASS(current_accel());
> +
> +    if (ac->has_memory) {
> +        fvi.ac = ac;
> +    }
> +
> +    /* Gather all FVs in one table */
> +    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
> +        view = address_space_get_flatview(as);
> +
> +        fv_address_spaces = g_hash_table_lookup(views, view);
> +        if (!fv_address_spaces) {
> +            fv_address_spaces = g_array_new(false, false, sizeof(as));
> +            g_hash_table_insert(views, view, fv_address_spaces);
> +        }
> +
> +        g_array_append_val(fv_address_spaces, as);
> +    }
> +
> +    /* Print */
> +    g_hash_table_foreach(views, mtree_print_flatview, &fvi);
> +
> +    /* Free */
> +    g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
> +    g_hash_table_unref(views);
> +}
> +
>  void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
>  {
>      MemoryRegionListHead ml_head;
> @@ -3253,39 +3291,7 @@ void mtree_info(bool flatview, bool dispatch_tree, 
> bool owner, bool disabled)
>      AddressSpace *as;
>  
>      if (flatview) {
> -        FlatView *view;
> -        struct FlatViewInfo fvi = {
> -            .counter = 0,
> -            .dispatch_tree = dispatch_tree,
> -            .owner = owner,
> -        };
> -        GArray *fv_address_spaces;
> -        GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
> -        AccelClass *ac = ACCEL_GET_CLASS(current_accel());
> -
> -        if (ac->has_memory) {
> -            fvi.ac = ac;
> -        }
> -
> -        /* Gather all FVs in one table */
> -        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
> -            view = address_space_get_flatview(as);
> -
> -            fv_address_spaces = g_hash_table_lookup(views, view);
> -            if (!fv_address_spaces) {
> -                fv_address_spaces = g_array_new(false, false, sizeof(as));
> -                g_hash_table_insert(views, view, fv_address_spaces);
> -            }
> -
> -            g_array_append_val(fv_address_spaces, as);
> -        }
> -
> -        /* Print */
> -        g_hash_table_foreach(views, mtree_print_flatview, &fvi);
> -
> -        /* Free */
> -        g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
> -        g_hash_table_unref(views);
> +        mtree_info_flatview(dispatch_tree, owner);
>  
>          return;
>      }
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]