[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 01/15] vdpa net: move iova tree creation from init to star
From: |
Eugenio Perez Martin |
Subject: |
Re: [PATCH v4 01/15] vdpa net: move iova tree creation from init to start |
Date: |
Wed, 1 Mar 2023 08:01:25 +0100 |
On Mon, Feb 27, 2023 at 8:04 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2023/2/24 23:54, Eugenio Pérez 写道:
> > Only create iova_tree if and when it is needed.
> >
> > The cleanup keeps being responsible of last VQ but this change allows it
> > to merge both cleanup functions.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > Acked-by: Jason Wang <jasowang@redhat.com>
> > ---
> > v4:
> > * Remove leak of iova_tree because double allocation
> > * Document better the sharing of IOVA tree between data and CVQ
> > ---
> > net/vhost-vdpa.c | 113 ++++++++++++++++++++++++++++++++++-------------
> > 1 file changed, 83 insertions(+), 30 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index de5ed8ff22..b89c99066a 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -178,13 +178,9 @@ err_init:
> > static void vhost_vdpa_cleanup(NetClientState *nc)
> > {
> > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > - struct vhost_dev *dev = &s->vhost_net->dev;
> >
> > qemu_vfree(s->cvq_cmd_out_buffer);
> > qemu_vfree(s->status);
> > - if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > - g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > - }
> > if (s->vhost_net) {
> > vhost_net_cleanup(s->vhost_net);
> > g_free(s->vhost_net);
> > @@ -234,10 +230,64 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc,
> > const uint8_t *buf,
> > return size;
> > }
> >
> > +/** From any vdpa net client, get the netclient of first queue pair */
> > +static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
> > +{
> > + NICState *nic = qemu_get_nic(s->nc.peer);
> > + NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
> > +
> > + return DO_UPCAST(VhostVDPAState, nc, nc0);
> > +}
> > +
> > +static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
> > +{
> > + struct vhost_vdpa *v = &s->vhost_vdpa;
> > +
> > + if (v->shadow_vqs_enabled) {
> > + v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
> > + v->iova_range.last);
> > + }
> > +}
> > +
> > +static int vhost_vdpa_net_data_start(NetClientState *nc)
> > +{
> > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > + struct vhost_vdpa *v = &s->vhost_vdpa;
> > +
> > + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > +
> > + if (v->index == 0) {
> > + vhost_vdpa_net_data_start_first(s);
> > + return 0;
> > + }
> > +
> > + if (v->shadow_vqs_enabled) {
> > + VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
> > + v->iova_tree = s0->vhost_vdpa.iova_tree;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void vhost_vdpa_net_client_stop(NetClientState *nc)
> > +{
> > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > + struct vhost_dev *dev;
> > +
> > + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > +
> > + dev = s->vhost_vdpa.dev;
> > + if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > + }
> > +}
> > +
> > static NetClientInfo net_vhost_vdpa_info = {
> > .type = NET_CLIENT_DRIVER_VHOST_VDPA,
> > .size = sizeof(VhostVDPAState),
> > .receive = vhost_vdpa_receive,
> > + .start = vhost_vdpa_net_data_start,
> > + .stop = vhost_vdpa_net_client_stop,
>
>
> Looking at the implementation, it seems nothing net specific, any reason
> we can't simply use vhost_vdpa_dev_start()?
>
IOVA tree must be shared between (at least) all dataplane vhost_vdpa.
How could we move the call to vhost_vdpa_net_first_nc_vdpa to
vhost_vdpa_dev_start?
A possibility is to always allocate it just in case. But it seems to
me it is better to not start allocating resources just in case :).
>
> > .cleanup = vhost_vdpa_cleanup,
> > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
> > .has_ufo = vhost_vdpa_has_ufo,
> > @@ -351,7 +401,7 @@ dma_map_err:
> >
> > static int vhost_vdpa_net_cvq_start(NetClientState *nc)
> > {
> > - VhostVDPAState *s;
> > + VhostVDPAState *s, *s0;
> > struct vhost_vdpa *v;
> > uint64_t backend_features;
> > int64_t cvq_group;
> > @@ -415,8 +465,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
> > return r;
> > }
> >
> > - v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
> > - v->iova_range.last);
> > v->shadow_vqs_enabled = true;
> > s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
> >
> > @@ -425,6 +473,27 @@ out:
> > return 0;
> > }
> >
> > + s0 = vhost_vdpa_net_first_nc_vdpa(s);
> > + if (s0->vhost_vdpa.iova_tree) {
> > + /*
> > + * SVQ is already configured for all virtqueues. Reuse IOVA tree
> > for
> > + * simplicity, wether CVQ shares ASID with guest or not, because:
>
>
> Typo, should be "whether", or "regardless of whether"(not a native speaker).
>
Good catch, I can fix it in the next version.
Thanks!
> Other looks good.
>
> Thanks
>
>
> > + * - Memory listener need access to guest's memory addresses
> > allocated
> > + * in the IOVA tree.
> > + * - There should be plenty of IOVA address space for both ASID
> > not to
> > + * worry about collisions between them. Guest's translations are
> > + * still validated with virtio virtqueue_pop so there is no risk
> > for
> > + * the guest to access memory it shouldn't.
> > + *
> > + * To allocate a iova tree per ASID is doable but it complicates
> > the
> > + * code and it is not worth for the moment.
> > + */
> > + v->iova_tree = s0->vhost_vdpa.iova_tree;
> > + } else {
> > + v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
> > + v->iova_range.last);
> > + }
> > +
> > r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
> > vhost_vdpa_net_cvq_cmd_page_len(), false);
> > if (unlikely(r < 0)) {
> > @@ -449,15 +518,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
> > if (s->vhost_vdpa.shadow_vqs_enabled) {
> > vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
> > vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
> > - if (!s->always_svq) {
> > - /*
> > - * If only the CVQ is shadowed we can delete this safely.
> > - * If all the VQs are shadows this will be needed by the time
> > the
> > - * device is started again to register SVQ vrings and similar.
> > - */
> > - g_clear_pointer(&s->vhost_vdpa.iova_tree,
> > vhost_iova_tree_delete);
> > - }
> > }
> > +
> > + vhost_vdpa_net_client_stop(nc);
> > }
> >
> > static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> > @@ -667,8 +730,7 @@ static NetClientState
> > *net_vhost_vdpa_init(NetClientState *peer,
> > int nvqs,
> > bool is_datapath,
> > bool svq,
> > - struct vhost_vdpa_iova_range
> > iova_range,
> > - VhostIOVATree *iova_tree)
> > + struct vhost_vdpa_iova_range
> > iova_range)
> > {
> > NetClientState *nc = NULL;
> > VhostVDPAState *s;
> > @@ -690,7 +752,6 @@ static NetClientState
> > *net_vhost_vdpa_init(NetClientState *peer,
> > s->vhost_vdpa.shadow_vqs_enabled = svq;
> > s->vhost_vdpa.iova_range = iova_range;
> > s->vhost_vdpa.shadow_data = svq;
> > - s->vhost_vdpa.iova_tree = iova_tree;
> > if (!is_datapath) {
> > s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
> >
> > vhost_vdpa_net_cvq_cmd_page_len());
> > @@ -760,7 +821,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const
> > char *name,
> > uint64_t features;
> > int vdpa_device_fd;
> > g_autofree NetClientState **ncs = NULL;
> > - g_autoptr(VhostIOVATree) iova_tree = NULL;
> > struct vhost_vdpa_iova_range iova_range;
> > NetClientState *nc;
> > int queue_pairs, r, i = 0, has_cvq = 0;
> > @@ -812,12 +872,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const
> > char *name,
> > goto err;
> > }
> >
> > - if (opts->x_svq) {
> > - if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
> > - goto err_svq;
> > - }
> > -
> > - iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > + if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp))
> > {
> > + goto err;
> > }
> >
> > ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > @@ -825,7 +881,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const
> > char *name,
> > for (i = 0; i < queue_pairs; i++) {
> > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > vdpa_device_fd, i, 2, true,
> > opts->x_svq,
> > - iova_range, iova_tree);
> > + iova_range);
> > if (!ncs[i])
> > goto err;
> > }
> > @@ -833,13 +889,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const
> > char *name,
> > if (has_cvq) {
> > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > vdpa_device_fd, i, 1, false,
> > - opts->x_svq, iova_range, iova_tree);
> > + opts->x_svq, iova_range);
> > if (!nc)
> > goto err;
> > }
> >
> > - /* iova_tree ownership belongs to last NetClientState */
> > - g_steal_pointer(&iova_tree);
> > return 0;
> >
> > err:
> > @@ -849,7 +903,6 @@ err:
> > }
> > }
> >
> > -err_svq:
> > qemu_close(vdpa_device_fd);
> >
> > return -1;
>
- Re: [PATCH v4 01/15] vdpa net: move iova tree creation from init to start,
Eugenio Perez Martin <=