[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH v8 21/21] vdpa: Add x-cvq-svq
From: |
Eugenio Pérez |
Subject: |
[RFC PATCH v8 21/21] vdpa: Add x-cvq-svq |
Date: |
Thu, 19 May 2022 21:13:06 +0200 |
This isolates shadow cvq in its own group.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
qapi/net.json | 8 ++-
net/vhost-vdpa.c | 134 ++++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 133 insertions(+), 9 deletions(-)
diff --git a/qapi/net.json b/qapi/net.json
index cd7a1b32fe..f5b047ae15 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -447,9 +447,12 @@
#
# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1)
# (default: false)
+# @x-cvq-svq: Start device with (experimental) shadow virtqueue in its own
+# virtqueue group. (Since 7.1)
+# (default: false)
#
# Features:
-# @unstable: Member @x-svq is experimental.
+# @unstable: Members @x-svq and x-cvq-svq are experimental.
#
# Since: 5.1
##
@@ -457,7 +460,8 @@
'data': {
'*vhostdev': 'str',
'*queues': 'int',
- '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
+ '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] },
+ '*x-cvq-svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
##
# @NetdevVmnetHostOptions:
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index ef8c82f92e..ad006a2bf3 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -70,6 +70,30 @@ const int vdpa_feature_bits[] = {
VHOST_INVALID_FEATURE_BIT
};
+/** Supported device specific feature bits with SVQ */
+static const uint64_t vdpa_svq_device_features =
+ BIT_ULL(VIRTIO_NET_F_CSUM) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
+ BIT_ULL(VIRTIO_NET_F_MTU) |
+ BIT_ULL(VIRTIO_NET_F_MAC) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
+ BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
+ BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
+ BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
+ BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
+ BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
+ BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
+ BIT_ULL(VIRTIO_NET_F_STATUS) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
+ BIT_ULL(VIRTIO_NET_F_MQ) |
+ BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
+ BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
+ BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
+ BIT_ULL(VIRTIO_NET_F_STANDBY);
+
VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@@ -352,6 +376,17 @@ static int vhost_vdpa_get_features(int fd, uint64_t
*features, Error **errp)
return ret;
}
+static int vhost_vdpa_get_backend_features(int fd, uint64_t *features,
+ Error **errp)
+{
+ int ret = ioctl(fd, VHOST_GET_BACKEND_FEATURES, features);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "Fail to query backend features from vhost-vDPA device");
+ }
+ return ret;
+}
+
static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
int *has_cvq, Error **errp)
{
@@ -385,16 +420,56 @@ static int vhost_vdpa_get_max_queue_pairs(int fd,
uint64_t features,
return 1;
}
+/**
+ * Check vdpa device to support CVQ group asid 1
+ *
+ * @vdpa_device_fd: Vdpa device fd
+ * @queue_pairs: Queue pairs
+ * @errp: Error
+ */
+static int vhost_vdpa_check_cvq_svq(int vdpa_device_fd, int queue_pairs,
+ Error **errp)
+{
+ uint64_t backend_features;
+ unsigned num_as;
+ int r;
+
+ r = vhost_vdpa_get_backend_features(vdpa_device_fd, &backend_features,
+ errp);
+ if (unlikely(r)) {
+ return -1;
+ }
+
+ if (unlikely(!(backend_features & VHOST_BACKEND_F_IOTLB_ASID))) {
+ error_setg(errp, "Device without IOTLB_ASID feature");
+ return -1;
+ }
+
+ r = ioctl(vdpa_device_fd, VHOST_VDPA_GET_AS_NUM, &num_as);
+ if (unlikely(r)) {
+ error_setg_errno(errp, errno,
+ "Cannot retrieve number of supported ASs");
+ return -1;
+ }
+ if (unlikely(num_as < 2)) {
+ error_setg(errp, "Insufficient number of ASs (%u, min: 2)", num_as);
+ }
+
+ return 0;
+}
+
int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
const NetdevVhostVDPAOptions *opts;
+ struct vhost_vdpa_iova_range iova_range;
uint64_t features;
int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL;
NetClientState *nc;
int queue_pairs, r, i, has_cvq = 0;
g_autoptr(VhostIOVATree) iova_tree = NULL;
+ ERRP_GUARD();
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
opts = &netdev->u.vhost_vdpa;
@@ -419,14 +494,35 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char
*name,
qemu_close(vdpa_device_fd);
return queue_pairs;
}
- if (opts->x_svq) {
- struct vhost_vdpa_iova_range iova_range;
+ if (opts->x_cvq_svq || opts->x_svq) {
+ vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
+
+ uint64_t invalid_dev_features =
+ features & ~vdpa_svq_device_features &
+ /* Transport are all accepted at this point */
+ ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
+ VIRTIO_TRANSPORT_F_END -
VIRTIO_TRANSPORT_F_START);
- if (has_cvq) {
- error_setg(errp, "vdpa svq does not work with cvq");
+ if (invalid_dev_features) {
+ error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
+ invalid_dev_features);
goto err_svq;
}
- vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
+ }
+
+ if (opts->x_cvq_svq) {
+ if (!has_cvq) {
+ error_setg(errp, "Cannot use x-cvq-svq with a device without cvq");
+ goto err_svq;
+ }
+
+ r = vhost_vdpa_check_cvq_svq(vdpa_device_fd, queue_pairs, errp);
+ if (unlikely(r)) {
+ error_prepend(errp, "Cannot configure CVQ SVQ: ");
+ goto err_svq;
+ }
+ }
+ if (opts->x_svq) {
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
}
@@ -441,11 +537,35 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char
*name,
}
if (has_cvq) {
+ g_autoptr(VhostIOVATree) cvq_iova_tree = NULL;
+
+ if (opts->x_cvq_svq) {
+ cvq_iova_tree = vhost_iova_tree_new(iova_range.first,
+ iova_range.last);
+ } else if (opts->x_svq) {
+ cvq_iova_tree = vhost_iova_tree_acquire(iova_tree);
+ }
+
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 1, false, opts->x_svq,
- iova_tree);
+ vdpa_device_fd, i, 1, false,
+ opts->x_cvq_svq || opts->x_svq,
+ cvq_iova_tree);
if (!nc)
goto err;
+
+ if (opts->x_cvq_svq) {
+ struct vhost_vring_state asid = {
+ .index = 1,
+ .num = 1,
+ };
+
+ r = ioctl(vdpa_device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
+ if (unlikely(r)) {
+ error_setg_errno(errp, errno,
+ "Cannot set cvq group independent asid");
+ goto err;
+ }
+ }
}
return 0;
--
2.27.0
- [RFC PATCH v8 08/21] vhost: Add SVQElement, (continued)
- [RFC PATCH v8 08/21] vhost: Add SVQElement, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 09/21] vhost: Add svq copy desc mode, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 11/21] vhost: Update kernel headers, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 10/21] vhost: Add vhost_svq_inject, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 12/21] vdpa: delay set_vring_ready after DRIVER_OK, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 13/21] vhost: Add ShadowVirtQueueStart operation, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 14/21] vhost: Make possible to check for device exclusive vq group, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 16/21] vdpa: Add vhost_vdpa_start_control_svq, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 17/21] vdpa: Add asid attribute to vdpa device, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 18/21] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 21/21] vdpa: Add x-cvq-svq,
Eugenio Pérez <=
- [RFC PATCH v8 15/21] vhost: add vhost_svq_poll, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 19/21] vhost: Add reference counting to vhost_iova_tree, Eugenio Pérez, 2022/05/19
- [RFC PATCH v8 20/21] vdpa: Add x-svq to NetdevVhostVDPAOptions, Eugenio Pérez, 2022/05/19