[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 8/8] vdpa: Send cvq state load commands in parallel
From: |
Hawkins Jiawei |
Subject: |
Re: [PATCH v4 8/8] vdpa: Send cvq state load commands in parallel |
Date: |
Sun, 8 Oct 2023 10:24:02 +0800 |
在 2023/10/4 15:33, Eugenio Perez Martin 写道:
> On Tue, Aug 29, 2023 at 7:55 AM Hawkins Jiawei <yin31149@gmail.com> wrote:
>>
>> This patch enables sending CVQ state load commands
>> in parallel at device startup by following steps:
>>
>> * Refactor vhost_vdpa_net_load_cmd() to iterate through
>> the control commands shadow buffers. This allows different
>> CVQ state load commands to use their own unique buffers.
>>
>> * Delay the polling and checking of buffers until either
>> the SVQ is full or control commands shadow buffers are full.
>>
>> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578
>> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
>> ---
>> v4:
>> - refactor argument `cmds_in_flight` to `len` for
>> vhost_vdpa_net_svq_full()
>> - check the return value of vhost_vdpa_net_svq_poll()
>> in vhost_vdpa_net_svq_flush() suggested by Eugenio
>> - use iov_size(), vhost_vdpa_net_load_cursor_reset()
>> and iov_discard_front() to update the cursors instead of
>> accessing it directly according to Eugenio
>>
>> v3:
>> https://lore.kernel.org/all/3a002790e6c880af928c6470ecbf03e7c65a68bb.1689748694.git.yin31149@gmail.com/
>>
>> net/vhost-vdpa.c | 155 +++++++++++++++++++++++++++++------------------
>> 1 file changed, 97 insertions(+), 58 deletions(-)
>>
>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>> index a71e8c9090..818464b702 100644
>> --- a/net/vhost-vdpa.c
>> +++ b/net/vhost-vdpa.c
>> @@ -646,6 +646,31 @@ static void
>> vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
>> in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
>> }
>>
>> +/*
>> + * Poll SVQ for multiple pending control commands and check the device's
>> ack.
>> + *
>> + * Caller should hold the BQL when invoking this function.
>> + *
>> + * @s: The VhostVDPAState
>> + * @len: The length of the pending status shadow buffer
>> + */
>> +static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
>> +{
>> + /* Device uses a one-byte length ack for each control command */
>> + ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
>> + if (unlikely(dev_written != len)) {
>> + return -EIO;
>> + }
>> +
>> + /* check the device's ack */
>> + for (int i = 0; i < len; ++i) {
>> + if (s->status[i] != VIRTIO_NET_OK) {
>> + return -EIO;
>> + }
>> + }
>> + return 0;
>> +}
>> +
>> static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
>> struct iovec *out_cursor,
>> struct iovec *in_cursor, uint8_t
>> class,
>> @@ -660,10 +685,30 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState
>> *s,
>> cmd_size = sizeof(ctrl) + data_size;
>> struct iovec out, in;
>> ssize_t r;
>> + unsigned dummy_cursor_iov_cnt;
>>
>> assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
>> + if (vhost_vdpa_net_svq_available_slots(s) < 2 ||
>> + iov_size(out_cursor, 1) < cmd_size) {
>> + /*
>> + * It is time to flush all pending control commands if SVQ is full
>> + * or control commands shadow buffers are full.
>> + *
>> + * We can poll here since we've had BQL from the time
>> + * we sent the descriptor.
>> + */
>> + r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
>> + (void *)s->status);
>> + if (unlikely(r < 0)) {
>> + return r;
>> + }
>> +
>> + vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
>> + }
>> +
>
> It would be great to merge this flush with the one at
> vhost_vdpa_net_load. We would need to return ENOSPC or similar and
> handle it there.
>
> But it would make it more difficult to iterate through the loading of
> the different parameters, so I think it can be done on top.
>
Hi Eugenio,
Please forgive my poor English, so do you mean the approach in my
patch is acceptable for you?
>> /* Each CVQ command has one out descriptor and one in descriptor */
>> assert(vhost_vdpa_net_svq_available_slots(s) >= 2);
>> + assert(iov_size(out_cursor, 1) >= cmd_size);
>>
>
> Same here, I think we can avoid the assertion, right?
You are right, I will remove this assertion.
Thanks!
>
> Apart from that,
>
> Acked-by: Eugenio Pérez <eperezma@redhat.com>
>
>> /* Prepare the buffer for out descriptor for the device */
>> iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
>> @@ -681,11 +726,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState
>> *s,
>> return r;
>> }
>>
>> - /*
>> - * We can poll here since we've had BQL from the time
>> - * we sent the descriptor.
>> - */
>> - return vhost_vdpa_net_svq_poll(s, 1);
>> + /* iterate the cursors */
>> + dummy_cursor_iov_cnt = 1;
>> + iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
>> + dummy_cursor_iov_cnt = 1;
>> + iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt,
>> sizeof(*s->status));
>> +
>> + return 0;
>> }
>>
>> static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
>> @@ -697,15 +744,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s,
>> const VirtIONet *n,
>> .iov_base = (void *)n->mac,
>> .iov_len = sizeof(n->mac),
>> };
>> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor,
>> in_cursor,
>> - VIRTIO_NET_CTRL_MAC,
>> -
>> VIRTIO_NET_CTRL_MAC_ADDR_SET,
>> - &data, 1);
>> - if (unlikely(dev_written < 0)) {
>> - return dev_written;
>> - }
>> - if (*s->status != VIRTIO_NET_OK) {
>> - return -EIO;
>> + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> + VIRTIO_NET_CTRL_MAC,
>> + VIRTIO_NET_CTRL_MAC_ADDR_SET,
>> + &data, 1);
>> + if (unlikely(r < 0)) {
>> + return r;
>> }
>> }
>>
>> @@ -750,15 +794,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s,
>> const VirtIONet *n,
>> .iov_len = mul_macs_size,
>> },
>> };
>> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> VIRTIO_NET_CTRL_MAC,
>> VIRTIO_NET_CTRL_MAC_TABLE_SET,
>> data, ARRAY_SIZE(data));
>> - if (unlikely(dev_written < 0)) {
>> - return dev_written;
>> - }
>> - if (*s->status != VIRTIO_NET_OK) {
>> - return -EIO;
>> + if (unlikely(r < 0)) {
>> + return r;
>> }
>>
>> return 0;
>> @@ -770,7 +811,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>> struct iovec *in_cursor)
>> {
>> struct virtio_net_ctrl_mq mq;
>> - ssize_t dev_written;
>> + ssize_t r;
>>
>> if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
>> return 0;
>> @@ -781,15 +822,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
>> .iov_base = &mq,
>> .iov_len = sizeof(mq),
>> };
>> - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> - VIRTIO_NET_CTRL_MQ,
>> - VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
>> - &data, 1);
>> - if (unlikely(dev_written < 0)) {
>> - return dev_written;
>> - }
>> - if (*s->status != VIRTIO_NET_OK) {
>> - return -EIO;
>> + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> + VIRTIO_NET_CTRL_MQ,
>> + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
>> + &data, 1);
>> + if (unlikely(r < 0)) {
>> + return r;
>> }
>>
>> return 0;
>> @@ -801,7 +839,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState
>> *s,
>> struct iovec *in_cursor)
>> {
>> uint64_t offloads;
>> - ssize_t dev_written;
>> + ssize_t r;
>>
>> if (!virtio_vdev_has_feature(&n->parent_obj,
>> VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
>> @@ -829,15 +867,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState
>> *s,
>> .iov_base = &offloads,
>> .iov_len = sizeof(offloads),
>> };
>> - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> - VIRTIO_NET_CTRL_GUEST_OFFLOADS,
>> -
>> VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
>> - &data, 1);
>> - if (unlikely(dev_written < 0)) {
>> - return dev_written;
>> - }
>> - if (*s->status != VIRTIO_NET_OK) {
>> - return -EIO;
>> + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> + VIRTIO_NET_CTRL_GUEST_OFFLOADS,
>> + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
>> + &data, 1);
>> + if (unlikely(r < 0)) {
>> + return r;
>> }
>>
>> return 0;
>> @@ -853,16 +888,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState
>> *s,
>> .iov_base = &on,
>> .iov_len = sizeof(on),
>> };
>> - ssize_t dev_written;
>> + ssize_t r;
>>
>> - dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> - VIRTIO_NET_CTRL_RX,
>> - cmd, &data, 1);
>> - if (unlikely(dev_written < 0)) {
>> - return dev_written;
>> - }
>> - if (*s->status != VIRTIO_NET_OK) {
>> - return -EIO;
>> + r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> + VIRTIO_NET_CTRL_RX, cmd, &data, 1);
>> + if (unlikely(r < 0)) {
>> + return r;
>> }
>>
>> return 0;
>> @@ -1019,15 +1050,12 @@ static int
>> vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
>> .iov_base = &vid,
>> .iov_len = sizeof(vid),
>> };
>> - ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> - VIRTIO_NET_CTRL_VLAN,
>> - VIRTIO_NET_CTRL_VLAN_ADD,
>> - &data, 1);
>> - if (unlikely(dev_written < 0)) {
>> - return dev_written;
>> - }
>> - if (unlikely(*s->status != VIRTIO_NET_OK)) {
>> - return -EIO;
>> + ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
>> + VIRTIO_NET_CTRL_VLAN,
>> + VIRTIO_NET_CTRL_VLAN_ADD,
>> + &data, 1);
>> + if (unlikely(r < 0)) {
>> + return r;
>> }
>>
>> return 0;
>> @@ -1096,6 +1124,17 @@ static int vhost_vdpa_net_load(NetClientState *nc)
>> return r;
>> }
>>
>> + /*
>> + * We need to poll and check all pending device's used buffers.
>> + *
>> + * We can poll here since we've had BQL from the time
>> + * we sent the descriptor.
>> + */
>> + r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
>> + if (unlikely(r)) {
>> + return r;
>> + }
>> +
>> return 0;
>> }
>>
>> --
>> 2.25.1
>>
>