qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: Re: [RFC v6] virtio/vsock: add two more queues for datagram types


From: Jiang Wang .
Subject: Re: Re: [RFC v6] virtio/vsock: add two more queues for datagram types
Date: Wed, 15 Sep 2021 20:59:17 -0700

On Tue, Sep 14, 2021 at 5:46 AM Stefan Hajnoczi <stefanha@redhat.com> wrote:
>
> On Mon, Sep 13, 2021 at 10:18:43PM +0000, Jiang Wang wrote:
> > Datagram sockets are connectionless and unreliable.
> > The sender does not know the capacity of the receiver
> > and may send more packets than the receiver can handle.
> >
> > Add two more dedicate virtqueues for datagram sockets,
> > so that it will not unfairly steal resources from
> > stream and future connection-oriented sockets.
> >
> > Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
> > ---
> > v1 -> v2: use qemu cmd option to control number of queues,
> >         removed configuration settings for dgram.
> > v2 -> v3: use ioctl to get features and decide number of
> >         virt queues, instead of qemu cmd option.
> > v3 -> v4: change DGRAM feature bit value to 2. Add an argument
> >         in vhost_vsock_common_realize to indicate dgram is supported or not.
> > v4 -> v5: don't open dev to get vhostfd. Removed leftover definition of
> >         enable_dgram
> > v5 -> v6: fix style errors. Imporve error handling of
> >         vhost_vsock_dgram_supported. Rename MAX_VQS_WITH_DGRAM and another 
> > one.
> >
> >  hw/virtio/vhost-user-vsock.c                  |  2 +-
> >  hw/virtio/vhost-vsock-common.c                | 25 ++++++++++++--
> >  hw/virtio/vhost-vsock.c                       | 34 ++++++++++++++++++-
> >  include/hw/virtio/vhost-vsock-common.h        |  6 ++--
> >  include/hw/virtio/vhost-vsock.h               |  3 ++
> >  include/standard-headers/linux/virtio_vsock.h |  1 +
> >  6 files changed, 64 insertions(+), 7 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
> > index 6095ed7349..e9ec0e1c00 100644
> > --- a/hw/virtio/vhost-user-vsock.c
> > +++ b/hw/virtio/vhost-user-vsock.c
> > @@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error 
> > **errp)
> >          return;
> >      }
> >
> > -    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
> > +    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
>
> VIRTIO_VSOCK_F_DGRAM support should work equally well for
> vhost-user-vsock. I don't think there is a need to disable it here.
>
Stefano mentioned in previous email ( V3 ) that I can disable dgram
for vhost-user-vsock for now. I think we need some extra changes to
fully support vhost-vsock-user, like feature discovery?

> >
> >      vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
> >
> > diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> > index 4ad6e234ad..d94636e04e 100644
> > --- a/hw/virtio/vhost-vsock-common.c
> > +++ b/hw/virtio/vhost-vsock-common.c
> > @@ -17,6 +17,8 @@
> >  #include "hw/virtio/vhost-vsock.h"
> >  #include "qemu/iov.h"
> >  #include "monitor/monitor.h"
> > +#include <sys/ioctl.h>
> > +#include <linux/vhost.h>
> >
> >  int vhost_vsock_common_start(VirtIODevice *vdev)
> >  {
> > @@ -196,9 +198,11 @@ int vhost_vsock_common_post_load(void *opaque, int 
> > version_id)
> >      return 0;
> >  }
> >
> > -void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> > +void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name,
> > +                               bool enable_dgram)
> >  {
> >      VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > +    int nvqs = VHOST_VSOCK_NVQS;
> >
> >      virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >                  sizeof(struct virtio_vsock_config));
> > @@ -209,12 +213,20 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, 
> > const char *name)
> >      vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                         vhost_vsock_common_handle_output);
> >
> > +    if (enable_dgram) {
> > +        nvqs = VHOST_VSOCK_NVQS_DGRAM;
> > +        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> > +                                              
> > vhost_vsock_common_handle_output);
> > +        vvc->dgram_trans_vq = virtio_add_queue(vdev, 
> > VHOST_VSOCK_QUEUE_SIZE,
> > +                                              
> > vhost_vsock_common_handle_output);
> > +    }
>
> I think the virtqueues can be added unconditionally.
>
OK.
> > +
> >      /* The event queue belongs to QEMU */
> >      vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                         vhost_vsock_common_handle_output);
> >
> > -    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> > -    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> > +    vvc->vhost_dev.nvqs = nvqs;
> > +    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, 
> > vvc->vhost_dev.nvqs);
>
> IIUC the number of virtqueues needs to be the maximum supported number.
> It's okay to have more virtqueues than needed. The feature bit is used
> by the driver to detect the device's support for dgrams, not the number
> of virtqueues.
>
OK. I can just make these changes. But I am not able to test vhost-user-vsock
as of now. I tried to follow the steps on here:
https://patchew.org/QEMU/20200515152110.202917-1-sgarzare@redhat.com/
But the git repo for the vhost-user-vsock is kind of broken. I tried to
fix it but I am new to rust so it will take some time. Is there any updated
or an easier way to test vhost-user-vsock?


> >
> >      vvc->post_load_timer = NULL;
> >  }
> > @@ -227,6 +239,13 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >
> >      virtio_delete_queue(vvc->recv_vq);
> >      virtio_delete_queue(vvc->trans_vq);
> > +    if (vvc->vhost_dev.nvqs == VHOST_VSOCK_NVQS_DGRAM) {
> > +        virtio_delete_queue(vvc->dgram_recv_vq);
> > +        virtio_delete_queue(vvc->dgram_trans_vq);
> > +    }
> > +
> > +    g_free(vvc->vhost_dev.vqs);
> > +
> >      virtio_delete_queue(vvc->event_vq);
> >      virtio_cleanup(vdev);
> >  }
> > diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> > index 1b1a5c70ed..891d38e226 100644
> > --- a/hw/virtio/vhost-vsock.c
> > +++ b/hw/virtio/vhost-vsock.c
> > @@ -20,9 +20,12 @@
> >  #include "hw/qdev-properties.h"
> >  #include "hw/virtio/vhost-vsock.h"
> >  #include "monitor/monitor.h"
> > +#include <sys/ioctl.h>
> > +#include <linux/vhost.h>
> >
> >  const int feature_bits[] = {
> >      VIRTIO_VSOCK_F_SEQPACKET,
> > +    VIRTIO_VSOCK_F_DGRAM,
>
> Stefano is currently working on a way to control live migration
> compatibility when features like seqpacket or dgram aren't available. He
> can suggest how to do this for dgram.
>
Yes. I watched that email thread. I can make similar changes to
DGRAM. I will do it for the next version.

> >      VHOST_INVALID_FEATURE_BIT
> >  };
> >
> > @@ -116,6 +119,9 @@ static uint64_t vhost_vsock_get_features(VirtIODevice 
> > *vdev,
> >      VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >
> >      virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
> > +    if (vvc->vhost_dev.nvqs == VHOST_VSOCK_NVQS_DGRAM) {
> > +        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> > +    }
> >      return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >                                  requested_features);
> >  }
> > @@ -132,13 +138,34 @@ static const VMStateDescription 
> > vmstate_virtio_vhost_vsock = {
> >      .post_load = vhost_vsock_common_post_load,
> >  };
> >
> > +static bool vhost_vsock_dgram_supported(int vhostfd, Error **errp)
> > +{
> > +    uint64_t features;
> > +    int ret;
> > +
> > +    ret = ioctl(vhostfd, VHOST_GET_FEATURES, &features);
> > +    if (ret) {
> > +        error_setg(errp, "vhost-vsock: failed to read device freatures. 
> > %s",
> > +                     strerror(errno));
> > +        return false;
> > +    }
>
> ioctl(2) shouldn't be necessary. Let vhost detect features from the
> device backend (vhost kernel or vhost-user) and don't worry about
I did not get this part. What are the difference between vhost and
device backend? I thought vhost is the device backend? vhost kernel
is one kind of backend and vhost-user is another kind.  Could you explain
more? Thanks.

> conditionally adding the exact number of virtqueues.
>
> > +
> > +    if (features & (1 << VIRTIO_VSOCK_F_DGRAM)) {
> > +        return true;
> > +    }
> > +
> > +    return false;
> > +}
> > +
> >  static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
> >  {
> > +    ERRP_GUARD();
> >      VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(dev);
> >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >      VHostVSock *vsock = VHOST_VSOCK(dev);
> >      int vhostfd;
> >      int ret;
> > +    bool enable_dgram;
> >
> >      /* Refuse to use reserved CID numbers */
> >      if (vsock->conf.guest_cid <= 2) {
> > @@ -175,7 +202,11 @@ static void vhost_vsock_device_realize(DeviceState 
> > *dev, Error **errp)
> >          qemu_set_nonblock(vhostfd);
> >      }
> >
> > -    vhost_vsock_common_realize(vdev, "vhost-vsock");
> > +    enable_dgram = vhost_vsock_dgram_supported(vhostfd, errp);
> > +    if (*errp) {
> > +        goto err_dgram;
> > +    }
> > +    vhost_vsock_common_realize(vdev, "vhost-vsock", enable_dgram);
> >
> >      ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd,
> >                           VHOST_BACKEND_TYPE_KERNEL, 0, errp);
> > @@ -197,6 +228,7 @@ err_vhost_dev:
> >      vhostfd = -1;
> >  err_virtio:
> >      vhost_vsock_common_unrealize(vdev);
> > +err_dgram:
> >      if (vhostfd >= 0) {
> >          close(vhostfd);
> >      }
> > diff --git a/include/hw/virtio/vhost-vsock-common.h 
> > b/include/hw/virtio/vhost-vsock-common.h
> > index e412b5ee98..80151aee35 100644
> > --- a/include/hw/virtio/vhost-vsock-common.h
> > +++ b/include/hw/virtio/vhost-vsock-common.h
> > @@ -27,12 +27,13 @@ enum {
> >  struct VHostVSockCommon {
> >      VirtIODevice parent;
> >
> > -    struct vhost_virtqueue vhost_vqs[2];
> >      struct vhost_dev vhost_dev;
> >
> >      VirtQueue *event_vq;
> >      VirtQueue *recv_vq;
> >      VirtQueue *trans_vq;
> > +    VirtQueue *dgram_recv_vq;
> > +    VirtQueue *dgram_trans_vq;
> >
> >      QEMUTimer *post_load_timer;
> >  };
> > @@ -41,7 +42,8 @@ int vhost_vsock_common_start(VirtIODevice *vdev);
> >  void vhost_vsock_common_stop(VirtIODevice *vdev);
> >  int vhost_vsock_common_pre_save(void *opaque);
> >  int vhost_vsock_common_post_load(void *opaque, int version_id);
> > -void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name);
> > +void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name,
> > +                                bool enable_dgram);
> >  void vhost_vsock_common_unrealize(VirtIODevice *vdev);
> >
> >  #endif /* _QEMU_VHOST_VSOCK_COMMON_H */
> > diff --git a/include/hw/virtio/vhost-vsock.h 
> > b/include/hw/virtio/vhost-vsock.h
> > index 84f4e727c7..87b2019b5a 100644
> > --- a/include/hw/virtio/vhost-vsock.h
> > +++ b/include/hw/virtio/vhost-vsock.h
> > @@ -33,4 +33,7 @@ struct VHostVSock {
> >      /*< public >*/
> >  };
> >
> > +#define VHOST_VSOCK_NVQS 2
> > +#define VHOST_VSOCK_NVQS_DGRAM 4
> > +
> >  #endif /* QEMU_VHOST_VSOCK_H */
> > diff --git a/include/standard-headers/linux/virtio_vsock.h 
> > b/include/standard-headers/linux/virtio_vsock.h
> > index 3a23488e42..7e35acf3d4 100644
> > --- a/include/standard-headers/linux/virtio_vsock.h
> > +++ b/include/standard-headers/linux/virtio_vsock.h
> > @@ -40,6 +40,7 @@
> >
> >  /* The feature bitmap for virtio vsock */
> >  #define VIRTIO_VSOCK_F_SEQPACKET     1       /* SOCK_SEQPACKET supported */
> > +#define VIRTIO_VSOCK_F_DGRAM         2       /* SOCK_DGRAM supported */
> >
> >  struct virtio_vsock_config {
> >       uint64_t guest_cid;
> > --
> > 2.20.1
> >



reply via email to

[Prev in Thread] Current Thread [Next in Thread]