qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 3/3] i386/tdx: handle TDG.VP.VMCALL<GetQuote>


From: Xiaoyao Li
Subject: Re: [PATCH 3/3] i386/tdx: handle TDG.VP.VMCALL<GetQuote>
Date: Fri, 20 Jun 2025 14:47:46 +0800
User-agent: Mozilla Thunderbird

On 6/20/2025 4:33 AM, Paolo Bonzini wrote:

...

+static void tdx_generate_quote_cleanup(TdxGenerateQuoteTask *task)
+{
+    timer_del(&task->timer);
+
+    g_source_remove(task->watch);

It needs to be

        if (task->watch) {
                g_source_remove(task->watch);
        }

for the case that QGS is not available, thus no task->watch initilaized, and we will get

  qemu-system-x86_64: GLib: g_source_remove: assertion 'tag > 0' failed

+    qio_channel_close(QIO_CHANNEL(task->sioc), NULL);
+    object_unref(OBJECT(task->sioc));
+
+    task->completion(task);
+}
+
+static gboolean tdx_get_quote_read(QIOChannel *ioc, GIOCondition condition,
+                                   gpointer opaque)
+{
...> + if (task->receive_buf_received >= (sizeof(qgs_msg_header_t) + HEADER_SIZE)) {
+        qgs_msg_header_t *hdr = (qgs_msg_header_t *)(task->receive_buf + 
HEADER_SIZE);
+        if (hdr->major_version != QGS_MSG_LIB_MAJOR_VER ||
+            hdr->minor_version != QGS_MSG_LIB_MINOR_VER) {

This check makes it fail with old QGS, which defines

QGS_MSG_LIB_MINOR_VER as 0,

so what QEMU gets is 1.0 instead of 1.1.

It is really a QGS bug that when it changes QGS_MSG_LIB_MINOR_VER from 0 to 1, it didn't consider the compatible issue.

However, the old QGS is there. Should we relax the check here for it? or just let it fail with old QGS?

+}
+

...

+static void tdx_quote_generator_connected(QIOTask *qio_task, gpointer opaque)
+{
+    TdxGenerateQuoteTask *task = opaque;
+    Error *err = NULL;
+    int ret;
+
+    ret = qio_task_propagate_error(qio_task, &err);
+    if (ret) {
+        error_report_err(err);
+        task->status_code = TDX_VP_GET_QUOTE_QGS_UNAVAILABLE;

Do we need to provide more specific information here? What we got with a wrong QGS socket address can be, e.g.,

qemu-system-x86_64: Failed to connect to '2:4051': Connection reset by peer

Which doesn't look directly related to QGS of TDX.

+        tdx_generate_quote_cleanup(task);
+        return;
+    }
+
+    task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_OUT,
+                                        tdx_send_report, task, NULL);
+}
+

...

+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run)

The previous version of mine, defined the return type as int, because it wants to stop the QEMU when it hits the failure of address_space_read/write. However, this patch returns TDG_VP_VMCALL_INVALID_OPERAND to TD guest for such cases.

Shouldn't the failure of address_space_read/write be treated as QEMU internal error?

+{
+    TdxGenerateQuoteTask *task;
+    struct tdx_get_quote_header hdr;
+    hwaddr buf_gpa = run->tdx.get_quote.gpa;
+    uint64_t buf_len = run->tdx.get_quote.size;
+
+    QEMU_BUILD_BUG_ON(sizeof(struct tdx_get_quote_header) != 
TDX_GET_QUOTE_HDR_SIZE);
+
+    run->tdx.get_quote.ret = TDG_VP_VMCALL_INVALID_OPERAND;
+
+    if (buf_len == 0) {
+        return;
+    }
+
+    if (!QEMU_IS_ALIGNED(buf_gpa, 4096) || !QEMU_IS_ALIGNED(buf_len, 4096)) {
+        run->tdx.get_quote.ret = TDG_VP_VMCALL_ALIGN_ERROR;
+        return;
+    }
+
+    if (address_space_read(&address_space_memory, buf_gpa, 
MEMTXATTRS_UNSPECIFIED,
+                           &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+        error_report("TDX: get-quote: failed to read GetQuote header.");
+        return;
+    }
+
+    if (le64_to_cpu(hdr.structure_version) != TDX_GET_QUOTE_STRUCTURE_VERSION) 
{
+        return;
+    }
+
+    /* Only safe-guard check to avoid too large buffer size. */
+    if (buf_len > TDX_GET_QUOTE_MAX_BUF_LEN ||
+        le32_to_cpu(hdr.in_len) > buf_len - TDX_GET_QUOTE_HDR_SIZE) {
+        return;
+    }
+
+    if (!tdx_guest->qg_sock_addr) {
+        hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_QGS_UNAVAILABLE);
+        if (address_space_write(&address_space_memory, buf_gpa,
+                                MEMTXATTRS_UNSPECIFIED,
+                                &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+            error_report("TDX: failed to update GetQuote header.");
+            return;
+        }
+        run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS;
+        return;
+    }
+
+    qemu_mutex_lock(&tdx_guest->lock);
+    if (tdx_guest->num >= TDX_MAX_GET_QUOTE_REQUEST) {
+        qemu_mutex_unlock(&tdx_guest->lock);
+        run->tdx.get_quote.ret = TDG_VP_VMCALL_RETRY;
+        return;
+    }
+    tdx_guest->num++;
+    qemu_mutex_unlock(&tdx_guest->lock);
+
+    task = g_new(TdxGenerateQuoteTask, 1);
+    task->buf_gpa = buf_gpa;
+    task->payload_gpa = buf_gpa + TDX_GET_QUOTE_HDR_SIZE;
+    task->payload_len = buf_len - TDX_GET_QUOTE_HDR_SIZE;
+    task->hdr = hdr;
+    task->completion = tdx_get_quote_completion;
+
+    task->send_data_size = le32_to_cpu(hdr.in_len);
+    task->send_data = g_malloc(task->send_data_size);
+    task->send_data_sent = 0;
+
+    if (address_space_read(&address_space_memory, task->payload_gpa,
+                           MEMTXATTRS_UNSPECIFIED, task->send_data,
+                           task->send_data_size) != MEMTX_OK) {
+        goto out_free;
+    }
+
+    /* Mark the buffer in-flight. */
+    hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_IN_FLIGHT);
+    if (address_space_write(&address_space_memory, buf_gpa,
+                            MEMTXATTRS_UNSPECIFIED,
+                            &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+        goto out_free;
+    }
+
+    task->receive_buf = g_malloc0(task->payload_len);
+    task->receive_buf_received = 0;
+    task->opaque = tdx_guest;
+
+    object_ref(tdx_guest);
+    tdx_generate_quote(task, tdx_guest->qg_sock_addr);
+    run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS;
+    return;
+
+out_free:
+    g_free(task->send_data);
+    g_free(task);
+}
+




reply via email to

[Prev in Thread] Current Thread [Next in Thread]