qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 4/7] vdpa: Add asid parameter to vhost_vdpa_dma_map/unmap


From: Eugenio Perez Martin
Subject: Re: [PATCH v3 4/7] vdpa: Add asid parameter to vhost_vdpa_dma_map/unmap
Date: Thu, 4 Aug 2022 09:48:16 +0200

On Thu, Aug 4, 2022 at 6:36 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/8/4 01:18, Eugenio Pérez 写道:
> > So the caller can choose which ASID is destined.
> >
> > No need to update the batch functions as they will always be called from
> > memory listener updates at the moment. Memory listener updates will
> > always update ASID 0, as it's the passthrough ASID.
> >
> > All vhost devices's ASID are 0 at this moment.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > v3: Deleted unneeded space
> > ---
> >   include/hw/virtio/vhost-vdpa.h |  8 +++++---
> >   hw/virtio/vhost-vdpa.c         | 25 +++++++++++++++----------
> >   net/vhost-vdpa.c               |  6 +++---
> >   hw/virtio/trace-events         |  4 ++--
> >   4 files changed, 25 insertions(+), 18 deletions(-)
> >
> > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> > index 1111d85643..6560bb9d78 100644
> > --- a/include/hw/virtio/vhost-vdpa.h
> > +++ b/include/hw/virtio/vhost-vdpa.h
> > @@ -29,6 +29,7 @@ typedef struct vhost_vdpa {
> >       int index;
> >       uint32_t msg_type;
> >       bool iotlb_batch_begin_sent;
> > +    uint32_t address_space_id;
> >       MemoryListener listener;
> >       struct vhost_vdpa_iova_range iova_range;
> >       uint64_t acked_features;
> > @@ -42,8 +43,9 @@ typedef struct vhost_vdpa {
> >       VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> >   } VhostVDPA;
> >
> > -int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
> > -                       void *vaddr, bool readonly);
> > -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size);
> > +int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> > +                       hwaddr size, void *vaddr, bool readonly);
> > +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> > +                         hwaddr size);
> >
> >   #endif
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 2fefcc66ad..131100841c 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -72,22 +72,24 @@ static bool 
> > vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> >       return false;
> >   }
> >
> > -int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
> > -                       void *vaddr, bool readonly)
> > +int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> > +                       hwaddr size, void *vaddr, bool readonly)
> >   {
> >       struct vhost_msg_v2 msg = {};
> >       int fd = v->device_fd;
> >       int ret = 0;
> >
> >       msg.type = v->msg_type;
> > +    msg.asid = asid;
>
>
> I wonder what happens if we're running is a kernel without ASID support.
>
> Does it work since asid will be simply ignored? Can we have a case that
> we want asid!=0 on old kernel?
>

No, it should always be 0 in that case. I can set it conditionally if
you prefer.

Thanks!

> Thanks
>
>
> >       msg.iotlb.iova = iova;
> >       msg.iotlb.size = size;
> >       msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
> >       msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
> >       msg.iotlb.type = VHOST_IOTLB_UPDATE;
> >
> > -   trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, 
> > msg.iotlb.size,
> > -                            msg.iotlb.uaddr, msg.iotlb.perm, 
> > msg.iotlb.type);
> > +    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
> > +                             msg.iotlb.size, msg.iotlb.uaddr, 
> > msg.iotlb.perm,
> > +                             msg.iotlb.type);
> >
> >       if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> >           error_report("failed to write, fd=%d, errno=%d (%s)",
> > @@ -98,18 +100,20 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr 
> > iova, hwaddr size,
> >       return ret;
> >   }
> >
> > -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
> > +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> > +                         hwaddr size)
> >   {
> >       struct vhost_msg_v2 msg = {};
> >       int fd = v->device_fd;
> >       int ret = 0;
> >
> >       msg.type = v->msg_type;
> > +    msg.asid = asid;
> >       msg.iotlb.iova = iova;
> >       msg.iotlb.size = size;
> >       msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
> >
> > -    trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
> > +    trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
> >                                  msg.iotlb.size, msg.iotlb.type);
> >
> >       if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> > @@ -229,7 +233,7 @@ static void 
> > vhost_vdpa_listener_region_add(MemoryListener *listener,
> >       }
> >
> >       vhost_vdpa_iotlb_batch_begin_once(v);
> > -    ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
> > +    ret = vhost_vdpa_dma_map(v, 0, iova, int128_get64(llsize),
> >                                vaddr, section->readonly);
> >       if (ret) {
> >           error_report("vhost vdpa map fail!");
> > @@ -299,7 +303,7 @@ static void 
> > vhost_vdpa_listener_region_del(MemoryListener *listener,
> >           vhost_iova_tree_remove(v->iova_tree, result);
> >       }
> >       vhost_vdpa_iotlb_batch_begin_once(v);
> > -    ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
> > +    ret = vhost_vdpa_dma_unmap(v, 0, iova, int128_get64(llsize));
> >       if (ret) {
> >           error_report("vhost_vdpa dma unmap error!");
> >       }
> > @@ -890,7 +894,7 @@ static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa 
> > *v,
> >       }
> >
> >       size = ROUND_UP(result->size, qemu_real_host_page_size());
> > -    r = vhost_vdpa_dma_unmap(v, result->iova, size);
> > +    r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
> >       return r == 0;
> >   }
> >
> > @@ -932,7 +936,8 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa 
> > *v, DMAMap *needle,
> >           return false;
> >       }
> >
> > -    r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
> > +    r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
> > +                           needle->size + 1,
> >                              (void *)(uintptr_t)needle->translated_addr,
> >                              needle->perm == IOMMU_RO);
> >       if (unlikely(r != 0)) {
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index f933ba53a3..f96c3cb1da 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -239,7 +239,7 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa 
> > *v, void *addr)
> >           return;
> >       }
> >
> > -    r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
> > +    r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size 
> > + 1);
> >       if (unlikely(r != 0)) {
> >           error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
> >       }
> > @@ -288,8 +288,8 @@ static bool vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v,
> >           return false;
> >       }
> >
> > -    r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), 
> > buf,
> > -                           !write);
> > +    r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
> > +                           vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
> >       if (unlikely(r < 0)) {
> >           goto dma_map_err;
> >       }
> > diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> > index 20af2e7ebd..36e5ae75f6 100644
> > --- a/hw/virtio/trace-events
> > +++ b/hw/virtio/trace-events
> > @@ -26,8 +26,8 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d 
> > flags:0x%"PRIx32""
> >   vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
> >
> >   # vhost-vdpa.c
> > -vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, 
> > uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d 
> > msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" 
> > perm: 0x%"PRIx8" type: %"PRIu8
> > -vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, 
> > uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 
> > 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> > +vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, 
> > uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) 
> > "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 
> > 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
> > +vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, 
> > uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: 
> > %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> >   vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, 
> > uint8_t type)  "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> >   vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t 
> > type)  "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> >   vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, 
> > void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" 
> > vaddr: %p read-only: %d"
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]