[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 3/3] kvm: Support for querying fd-based stats
From: |
Mark Kanda |
Subject: |
[PATCH v3 3/3] kvm: Support for querying fd-based stats |
Date: |
Mon, 31 Jan 2022 13:43:12 -0600 |
Add support for querying fd-based KVM stats - as introduced by Linux kernel
commit:
cb082bfab59a ("KVM: stats: Add fd-based API to read binary stats data")
Signed-off-by: Mark Kanda <mark.kanda@oracle.com>
---
accel/kvm/kvm-all.c | 308 ++++++++++++++++++++++++++++++++++++++++++++
qapi/misc.json | 2 +-
2 files changed, 309 insertions(+), 1 deletion(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 0e66ebb497..e43e1f1c1c 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -47,6 +47,8 @@
#include "kvm-cpus.h"
#include "hw/boards.h"
+#include "qapi/qapi-commands-misc.h"
+#include "monitor/stats.h"
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
@@ -2309,6 +2311,9 @@ bool kvm_dirty_ring_enabled(void)
return kvm_state->kvm_dirty_ring_size ? true : false;
}
+static void query_stats_cb(StatsResults *, StatsFilter *, Error **);
+static void query_stats_schemas_cb(StatsSchemaResult *, Error **);
+
static int kvm_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -2637,6 +2642,11 @@ static int kvm_init(MachineState *ms)
}
}
+ if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
+ add_stats_callbacks(STATS_PROVIDER_KVM, &query_stats_cb,
+ &query_stats_schemas_cb);
+ }
+
return 0;
err:
@@ -3696,3 +3706,301 @@ static void kvm_type_init(void)
}
type_init(kvm_type_init);
+
+typedef struct StatsArgs {
+ StatsFilter *filter;
+ bool is_schema;
+ union {
+ StatsResults *stats;
+ StatsSchemaResult *schema;
+ } result;
+ Error **errp;
+} StatsArgs;
+
+static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
+ uint64_t *stats_data,
+ StatsList *stats_list,
+ Error **errp)
+{
+ StatsList *stats_entry = g_malloc0(sizeof(*stats_entry));
+ Stats *stats = g_malloc0(sizeof(*stats));
+ uint64List *val_list = NULL;
+ int i;
+
+ stats->name = g_strdup(pdesc->name);
+ stats->value = g_malloc0(sizeof(*stats->value));
+
+ /* Alloc and populate data list */
+ if (pdesc->size == 1) {
+ stats->value->u.scalar = *stats_data;
+ stats->value->type = QTYPE_QNUM;
+ } else {
+ for (i = 0; i < pdesc->size; i++) {
+ uint64List *val_entry = g_malloc0(sizeof(*val_entry));
+ val_entry->value = stats_data[i];
+ val_entry->next = val_list;
+ val_list = val_entry;
+ }
+ stats->value->u.list.list = val_list;
+ stats->value->type = QTYPE_QDICT;
+ }
+
+ stats_entry->value = stats;
+ stats_entry->next = stats_list;
+
+ return stats_entry;
+}
+
+static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
+ StatsSchemaValueList *list,
+ Error **errp)
+{
+ StatsSchemaValueList *schema_entry;
+
+ schema_entry = g_malloc0(sizeof(*schema_entry));
+ schema_entry->value = g_malloc0(sizeof(*schema_entry->value));
+ schema_entry->value->name = g_strdup(pdesc->name);
+
+ switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
+ case KVM_STATS_TYPE_CUMULATIVE:
+ schema_entry->value->type = STAT_TYPE_CUMULATIVE;
+ break;
+ case KVM_STATS_TYPE_INSTANT:
+ schema_entry->value->type = STAT_TYPE_INSTANT;
+ break;
+ case KVM_STATS_TYPE_PEAK:
+ schema_entry->value->type = STAT_TYPE_PEAK;
+ break;
+ case KVM_STATS_TYPE_LINEAR_HIST:
+ schema_entry->value->type = STAT_TYPE_LINEAR_HIST;
+ break;
+ case KVM_STATS_TYPE_LOG_HIST:
+ schema_entry->value->type = STAT_TYPE_LOG_HIST;
+ break;
+ default:
+ schema_entry->value->type = STAT_TYPE_UNKNOWN;
+ }
+
+ switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
+ case KVM_STATS_UNIT_NONE:
+ schema_entry->value->unit = STAT_UNIT_NONE;
+ break;
+ case KVM_STATS_UNIT_BYTES:
+ schema_entry->value->unit = STAT_UNIT_BYTES;
+ break;
+ case KVM_STATS_UNIT_CYCLES:
+ schema_entry->value->unit = STAT_UNIT_CYCLES;
+ break;
+ case KVM_STATS_UNIT_SECONDS:
+ schema_entry->value->unit = STAT_UNIT_SECONDS;
+ break;
+ default:
+ schema_entry->value->unit = STAT_UNIT_UNKNOWN;
+ }
+
+ switch (pdesc->flags & KVM_STATS_BASE_MASK) {
+ case KVM_STATS_BASE_POW10:
+ schema_entry->value->base = STAT_BASE_POW10;
+ break;
+ case KVM_STATS_BASE_POW2:
+ schema_entry->value->base = STAT_BASE_POW2;
+ break;
+ default:
+ schema_entry->value->base = STAT_BASE_UNKNOWN;
+ }
+
+ schema_entry->value->exponent = pdesc->exponent;
+ schema_entry->next = list;
+ return schema_entry;
+}
+
+static void query_stats(StatsArgs *kvm_stat_args, int stats_fd)
+{
+ size_t size_desc, size_data;
+ struct kvm_stats_header *header;
+ struct kvm_stats_desc *stats_desc = NULL;
+ Error *local_err = NULL;
+ void *stats_list = NULL;
+ char *id = NULL;
+ ssize_t ret;
+ int i;
+
+ /* Read KVM stats header */
+ header = g_malloc(sizeof(*header));
+ ret = read(stats_fd, header, sizeof(*header));
+ if (ret != sizeof(*header)) {
+ error_setg(&local_err, "KVM stats: failed to read stats header: "
+ "expected %zu actual %zu", sizeof(*header), ret);
+ goto exit;
+ }
+ size_desc = sizeof(*stats_desc) + header->name_size;
+
+ /* Read KVM stats id string */
+ id = g_malloc(header->name_size);
+ ret = read(stats_fd, id, header->name_size);
+ if (ret != header->name_size) {
+ error_setg(&local_err, "KVM stats: failed to read id string: "
+ "expected %zu actual %zu", (size_t) header->name_size, ret);
+ goto exit;
+ }
+
+ /* Read KVM stats descriptors */
+ stats_desc = g_malloc0(header->num_desc * size_desc);
+ ret = pread(stats_fd, stats_desc,
+ size_desc * header->num_desc, header->desc_offset);
+
+ if (ret != size_desc * header->num_desc) {
+ error_setg(&local_err, "KVM stats: failed to read stats descriptors: "
+ "expected %zu actual %zu",
+ size_desc * header->num_desc, ret);
+ goto exit;
+ }
+
+ for (i = 0; i < header->num_desc; ++i) {
+ struct kvm_stats_desc *pdesc = (void *)stats_desc + i * size_desc;
+ size_data = pdesc->size * sizeof(uint64_t);
+
+ uint64_t *stats_data = g_malloc(size_data);
+
+ ret = pread(stats_fd, stats_data, size_data,
+ header->data_offset + pdesc->offset);
+
+ if (ret != pdesc->size * sizeof(*stats_data)) {
+ error_setg(&local_err, "KVM stats: failed to read data: "
+ "expected %zu actual %zu",
+ pdesc->size * sizeof(*stats_data), ret);
+ g_free(stats_data);
+ goto exit;
+ }
+
+ /* Add entry to the list */
+ if (kvm_stat_args->is_schema) {
+ stats_list = add_kvmschema_entry(pdesc, (StatsSchemaValueList *)
+ stats_list, &local_err);
+ } else {
+ if (stat_name_filter(kvm_stat_args->filter,
+ kvm_stat_args->filter->target, pdesc->name)) {
+ g_free(stats_data);
+ continue;
+ }
+ stats_list = add_kvmstat_entry(pdesc, stats_data,
+ (StatsList *) stats_list,
+ &local_err);
+ }
+ g_free(stats_data);
+ }
+
+ if (!stats_list) {
+ goto exit;
+ }
+
+ if (kvm_stat_args->is_schema) {
+ StatsSchemaProvider *provider;
+ if (kvm_stat_args->filter->target == STATS_TARGET_VM) {
+ provider = add_vm_stats_schema(kvm_stat_args->result.schema,
+ STATS_PROVIDER_KVM);
+ } else if (kvm_stat_args->filter->target == STATS_TARGET_VCPU) {
+ provider = add_vcpu_stats_schema(kvm_stat_args->result.schema,
+ STATS_PROVIDER_KVM);
+ }
+ provider->stats = (StatsSchemaValueList *)stats_list;
+ } else {
+ StatsResultsEntry *results;
+ if (kvm_stat_args->filter->target == STATS_TARGET_VM) {
+ results = add_vm_stats_entry(kvm_stat_args->result.stats,
+ STATS_PROVIDER_KVM);
+ } else if (kvm_stat_args->filter->target == STATS_TARGET_VCPU) {
+ results =
+ add_vcpu_stats_entry(kvm_stat_args->result.stats,
+ STATS_PROVIDER_KVM,
+ current_cpu->parent_obj.canonical_path);
+ }
+ results->stats = (StatsList *)stats_list;
+ }
+exit:
+ error_propagate(kvm_stat_args->errp, local_err);
+ g_free(stats_desc);
+ g_free(id);
+ g_free(header);
+}
+
+static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
+{
+ StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
+ int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
+ Error *local_err = NULL;
+
+ if (stats_fd == -1) {
+ error_setg(&local_err, "KVM stats: ioctl failed");
+ error_propagate(kvm_stats_args->errp, local_err);
+ return;
+ }
+ query_stats(kvm_stats_args, stats_fd);
+ close(stats_fd);
+}
+
+static void query_stats_cb(StatsResults *stats_result, StatsFilter *filter,
+ Error **errp)
+{
+ KVMState *s = kvm_state;
+ CPUState *cpu;
+ int stats_fd;
+ StatsArgs *stats_args = g_malloc0(sizeof(*stats_args));
+ stats_args->filter = filter;
+ stats_args->errp = errp;
+ stats_args->result.stats = stats_result;
+
+ switch (filter->target) {
+ case STATS_TARGET_VM:
+ stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
+ if (stats_fd == -1) {
+ error_setg(errp, "KVM stats: ioctl failed");
+ goto done;
+ }
+
+ query_stats(stats_args, stats_fd);
+ break;
+ case STATS_TARGET_VCPU:
+ CPU_FOREACH(cpu) {
+ if (stat_cpu_filter(filter, cpu->parent_obj.canonical_path)) {
+ continue;
+ }
+ run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(stats_args));
+ }
+ break;
+ default:
+ break;
+ }
+done:
+ g_free(stats_args);
+}
+
+void query_stats_schemas_cb(StatsSchemaResult *stats_result, Error **errp)
+{
+ KVMState *s = kvm_state;
+ int stats_fd;
+ StatsArgs *stats_args = g_malloc0(sizeof(*stats_args));
+ stats_args->filter = g_malloc0(sizeof(*stats_args->filter));
+ stats_args->errp = errp;
+
+ stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
+ if (stats_fd == -1) {
+ error_setg(errp, "KVM stats: ioctl failed");
+ goto done;
+ }
+
+ stats_args->result.schema = stats_result;
+ stats_args->is_schema = true;
+
+ /* Query VM */
+ stats_args->filter->target = STATS_TARGET_VM;
+ query_stats(stats_args, stats_fd);
+
+ /* Query vCPU */
+ stats_args->filter->target = STATS_TARGET_VCPU;
+ run_on_cpu(first_cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(stats_args));
+done:
+ g_free(stats_args->filter);
+ g_free(stats_args);
+}
+
diff --git a/qapi/misc.json b/qapi/misc.json
index 8d326464f0..39a7d172cb 100644
--- a/qapi/misc.json
+++ b/qapi/misc.json
@@ -574,7 +574,7 @@
# Since: 7.0
##
{ 'enum': 'StatsProvider',
- 'data': [ ] }
+ 'data': [ 'kvm' ] }
##
# @StatsTarget:
--
2.27.0