qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 1/2] vdpa: Restore hash calculation state


From: Michael S. Tsirkin
Subject: Re: [PATCH v3 1/2] vdpa: Restore hash calculation state
Date: Sun, 22 Oct 2023 06:00:33 -0400

On Sun, Oct 22, 2023 at 10:00:48AM +0800, Hawkins Jiawei wrote:
> This patch introduces vhost_vdpa_net_load_rss() to restore
> the hash calculation state at device's startup.
> 
> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
> ---
> v3:
>   - remove the `do_rss` argument in vhost_vdpa_net_load_rss()
>   - zero reserved fields in "cfg" manually instead of using memset()
> to prevent compiler "array-bounds" warning
> 
> v2: 
> https://lore.kernel.org/all/f5ffad10699001107022851e0560cb394039d6b0.1693297766.git.yin31149@gmail.com/
>   - resolve conflict with updated patch
> "vdpa: Send all CVQ state load commands in parallel"
>   - move the `table` declaration at the beginning of the
> vhost_vdpa_net_load_rss()
> 
> RFC: 
> https://lore.kernel.org/all/a54ca70b12ebe2f3c391864e41241697ab1aba30.1691762906.git.yin31149@gmail.com/
> 
>  net/vhost-vdpa.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 89 insertions(+)
> 
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 4b7c3b81b8..2e4bad65b4 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -817,6 +817,86 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, 
> const VirtIONet *n,
>      return 0;
>  }
>  
> +static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
> +                                   struct iovec *out_cursor,
> +                                   struct iovec *in_cursor)
> +{
> +    struct virtio_net_rss_config cfg;
> +    ssize_t r;
> +    g_autofree uint16_t *table = NULL;
> +
> +    /*
> +     * According to VirtIO standard, "Initially the device has all hash
> +     * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
> +     *
> +     * Therefore, there is no need to send this CVQ command if the
> +     * driver disable the all hash types, which aligns with

disables? or disabled

> +     * the device's defaults.
> +     *
> +     * Note that the device's defaults can mismatch the driver's
> +     * configuration only at live migration.
> +     */
> +    if (!n->rss_data.enabled ||
> +        n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
> +        return 0;
> +    }
> +
> +    table = g_malloc_n(n->rss_data.indirections_len,
> +                       sizeof(n->rss_data.indirections_table[0]));
> +    cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
> +
> +    /*
> +     * According to VirtIO standard, "Field reserved MUST contain zeroes.
> +     * It is defined to make the structure to match the layout of
> +     * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
> +     *
> +     * Therefore, we need to zero the fields in struct virtio_net_rss_config,
> +     * which corresponds the `reserved` field in

corresponds to

> +     * struct virtio_net_hash_config.
> +     */
> +    cfg.indirection_table_mask = 0;
> +    cfg.unclassified_queue = 0;
> +    table[0] = 0; /* the actual indirection table for cfg */
> +    cfg.max_tx_vq = 0;

Wouldn't it be easier to just do cfg = {} where it is defined?

> +
> +    /*
> +     * Consider that virtio_net_handle_rss() currently does not restore the
> +     * hash key length parsed from the CVQ command sent from the guest into
> +     * n->rss_data and uses the maximum key length in other code, so we also
> +     * employthe the maxium key length here.

two typos

> +     */
> +    cfg.hash_key_length = sizeof(n->rss_data.key);
> +
> +    const struct iovec data[] = {
> +        {
> +            .iov_base = &cfg,
> +            .iov_len = offsetof(struct virtio_net_rss_config,
> +                                indirection_table),
> +        }, {
> +            .iov_base = table,
> +            .iov_len = n->rss_data.indirections_len *
> +                       sizeof(n->rss_data.indirections_table[0]),
> +        }, {
> +            .iov_base = &cfg.max_tx_vq,
> +            .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) 
> -
> +                       offsetof(struct virtio_net_rss_config, max_tx_vq),
> +        }, {
> +            .iov_base = (void *)n->rss_data.key,

cast to void * should not be needed here.

> +            .iov_len = sizeof(n->rss_data.key),
> +        }
> +    };
> +
> +    r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
> +                                VIRTIO_NET_CTRL_MQ,
> +                                VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
> +                                data, ARRAY_SIZE(data));
> +    if (unlikely(r < 0)) {
> +        return r;
> +    }
> +
> +    return 0;
> +}
> +
>  static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>                                    const VirtIONet *n,
>                                    struct iovec *out_cursor,
> @@ -842,6 +922,15 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>          return r;
>      }
>  
> +    if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_HASH_REPORT)) {
> +        return 0;
> +    }
> +
> +    r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor);
> +    if (unlikely(r < 0)) {
> +        return r;
> +    }
> +
>      return 0;
>  }
>  
> -- 
> 2.25.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]