[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 04/18] vdpa: move shadow_data to vhost_vdpa_shared
From: |
Eugenio Pérez |
Subject: |
[RFC PATCH 04/18] vdpa: move shadow_data to vhost_vdpa_shared |
Date: |
Thu, 19 Oct 2023 16:34:41 +0200 |
Next patches will register the vhost_vdpa memory listener while the VM
is migrating at the destination, so we can map the memory to the device
before stopping the VM at the source. The main goal is to reduce the
downtime.
However, the destination QEMU is unaware of which vhost_vdpa device will
register its memory_listener. If the source guest has CVQ enabled, it
will be the CVQ device. Otherwise, it will be the first one.
Move the shadow_data member to VhostVDPAShared so all vhost_vdpa can use
it, rather than always in the first or last vhost_vdpa.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/hw/virtio/vhost-vdpa.h | 5 +++--
hw/virtio/vhost-vdpa.c | 6 +++---
net/vhost-vdpa.c | 23 ++++++-----------------
3 files changed, 12 insertions(+), 22 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 8d52a7e498..01e0f25e27 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -36,6 +36,9 @@ typedef struct vhost_vdpa_shared {
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+
+ /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
+ bool shadow_data;
} VhostVDPAShared;
typedef struct vhost_vdpa {
@@ -47,8 +50,6 @@ typedef struct vhost_vdpa {
MemoryListener listener;
uint64_t acked_features;
bool shadow_vqs_enabled;
- /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
- bool shadow_data;
/* Device suspended successfully */
bool suspended;
VhostVDPAShared *shared;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 2bceadd118..ec028e4c56 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -353,7 +353,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener
*listener,
vaddr, section->readonly);
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
int r;
mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
@@ -380,7 +380,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener
*listener,
return;
fail_map:
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
vhost_iova_tree_remove(v->shared->iova_tree, mem_region);
}
@@ -435,7 +435,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener
*listener,
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
const DMAMap *result;
const void *vaddr = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region +
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 9648b0ef7e..01202350ea 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -282,15 +282,6 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc,
const uint8_t *buf,
return size;
}
-/** From any vdpa net client, get the netclient of the first queue pair */
-static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
-{
- NICState *nic = qemu_get_nic(s->nc.peer);
- NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
-
- return DO_UPCAST(VhostVDPAState, nc, nc0);
-}
-
static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
@@ -360,10 +351,10 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
if (s->always_svq ||
migration_is_setup_or_active(migrate_get_current()->state)) {
v->shadow_vqs_enabled = true;
- v->shadow_data = true;
+ v->shared->shadow_data = true;
} else {
v->shadow_vqs_enabled = false;
- v->shadow_data = false;
+ v->shared->shadow_data = false;
}
if (v->index == 0) {
@@ -513,7 +504,7 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- VhostVDPAState *s, *s0;
+ VhostVDPAState *s;
struct vhost_vdpa *v;
int64_t cvq_group;
int r;
@@ -524,12 +515,10 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
s = DO_UPCAST(VhostVDPAState, nc, nc);
v = &s->vhost_vdpa;
- s0 = vhost_vdpa_net_first_nc_vdpa(s);
- v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
- v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
+ v->shadow_vqs_enabled = s->always_svq;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
- if (s->vhost_vdpa.shadow_data) {
+ if (v->shared->shadow_data) {
/* SVQ is already configured for all virtqueues */
goto out;
}
@@ -1455,12 +1444,12 @@ static NetClientState
*net_vhost_vdpa_init(NetClientState *peer,
s->always_svq = svq;
s->migration_state.notify = vdpa_net_migration_state_notifier;
s->vhost_vdpa.shadow_vqs_enabled = svq;
- s->vhost_vdpa.shadow_data = svq;
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
s->vhost_vdpa.shared->iova_range = iova_range;
+ s->vhost_vdpa.shared->shadow_data = svq;
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
--
2.39.3
- [RFC PATCH 00/18] Map memory at destination .load_setup in vDPA-net migration, Eugenio Pérez, 2023/10/19
- [RFC PATCH 04/18] vdpa: move shadow_data to vhost_vdpa_shared,
Eugenio Pérez <=
- [RFC PATCH 01/18] vdpa: add VhostVDPAShared, Eugenio Pérez, 2023/10/19
- [RFC PATCH 02/18] vdpa: move iova tree to the shared struct, Eugenio Pérez, 2023/10/19
- [RFC PATCH 03/18] vdpa: move iova_range to vhost_vdpa_shared, Eugenio Pérez, 2023/10/19
- [RFC PATCH 05/18] vdpa: use vdpa shared for tracing, Eugenio Pérez, 2023/10/19
- [RFC PATCH 07/18] vdpa: move iotlb_batch_begin_sent to vhost_vdpa_shared, Eugenio Pérez, 2023/10/19
- [RFC PATCH 06/18] vdpa: move file descriptor to vhost_vdpa_shared, Eugenio Pérez, 2023/10/19
- [RFC PATCH 09/18] vdpa: remove msg type of vhost_vdpa, Eugenio Pérez, 2023/10/19
- [RFC PATCH 10/18] vdpa: move iommu_list to vhost_vdpa_shared, Eugenio Pérez, 2023/10/19
- [RFC PATCH 11/18] vdpa: use VhostVDPAShared in vdpa_dma_map and unmap, Eugenio Pérez, 2023/10/19
- [RFC PATCH 08/18] vdpa: move backend_cap to vhost_vdpa_shared, Eugenio Pérez, 2023/10/19