qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] vhost-user: broken mem regions


From: Damjan Marion (damarion)
Subject: Re: [Qemu-devel] vhost-user: broken mem regions
Date: Wed, 25 Jun 2014 21:37:58 +0000

On 25 Jun 2014, at 20:18, Michael S. Tsirkin <address@hidden> wrote:

> On Wed, Jun 25, 2014 at 09:13:36PM +0300, Nikolay Nikolaev wrote:
>> On Wed, Jun 25, 2014 at 9:07 PM, Michael S. Tsirkin <address@hidden> wrote:
>>> On Wed, Jun 25, 2014 at 07:56:46PM +0300, Nikolay Nikolaev wrote:
>>>> On Wed, Jun 25, 2014 at 7:44 PM, Paolo Bonzini <address@hidden> wrote:
>>>>>> nregions: 4
>>>>>> region:
>>>>>>      gpa = 0x100000000
>>>>>>      size = 3221225472
>>>>>>      ua = 0x2aab6ac00000
>>>>> 
>>>>> High memory, above 3 gigabytes.
>>>>> 
>>>>>> region:
>>>>>>      gpa = 0xFFFC0000
>>>>>>      size = 262144
>>>>>>      ua = 0x7fc13d200000
>>>>> 
>>>>> This is the BIOS.  There shouldn't be any FD for this one, it
>>>>> is not allocated in hugetlbfs.
>>>>> 
>>>>>> region:
>>>>>>      gpa = 0x0
>>>>>>      size = 655360
>>>>>>      ua = 0x2aaaaac00000
>>>>>> region:
>>>>>>      gpa = 0xC0000
>>>>>>      size = 3220439040
>>>>>>      ua = 0x2aaaaacc0000
>>>>> 
>>>>> Together, it's the first 3 GB of memory.
>>>>> 
>>>>> I understand now what you mean.  Yeah, the format should be changed
>>>>> to include the offset (why does vhost-user need the ua at all?
>>>> The vring addresses are QEMU UA addresses. Of course vhost-user can
>>>> translate them to guest physical before sending the message.
>>> 
>>> It seems useful to have them as QEMU UA, this will allow
>>> frontends other than virtio where QEMU operates the
>>> ring.
>> 
>> OK - so there will be a new offset field. That's fine with me. What
>> would be the deadline for such change?
>> It's not exactly bugfix. On the other hand there's no wide adoption of
>> the protocol so it's still not critical to change it.
> 
> Right. So you have to do this before the hard freeze.
> And don't cut it too fine either :)

OK, i did the change. Pasting code here for review, to keep the thread.

diff --git a/exec.c b/exec.c
index c849405..a94c583 100644
--- a/exec.c
+++ b/exec.c
@@ -1456,6 +1456,13 @@ int qemu_get_ram_fd(ram_addr_t addr)
     return block->fd;
 }

+void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
+{
+    RAMBlock *block = qemu_get_ram_block(addr);
+
+    return block->host;
+}
+
 /* Return a host pointer to ram allocated with qemu_ram_alloc.
    With the exception of the softmmu code in this file, this should
    only be used for local memory (e.g. video ram) that the device owns,
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 0df6a93..0cef2d3 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -14,6 +14,7 @@
 #include "sysemu/kvm.h"
 #include "qemu/error-report.h"
 #include "qemu/sockets.h"
+#include "exec/ram_addr.h"

 #include <fcntl.h>
 #include <unistd.h>
@@ -47,6 +48,7 @@ typedef struct VhostUserMemoryRegion {
     uint64_t guest_phys_addr;
     uint64_t memory_size;
     uint64_t userspace_addr;
+    uint64_t shm_offset;
 } VhostUserMemoryRegion;

 typedef struct VhostUserMemory {
@@ -183,10 +185,10 @@ static int vhost_user_call(struct vhost_dev *dev, 
unsigned long int request,
 {
     VhostUserMsg msg;
     VhostUserRequest msg_request;
-    RAMBlock *block = 0;
     struct vhost_vring_file *file = 0;
     int need_reply = 0;
     int fds[VHOST_MEMORY_MAX_NREGIONS];
+    int i, fd;
     size_t fd_num = 0;

     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
@@ -212,14 +214,16 @@ static int vhost_user_call(struct vhost_dev *dev, 
unsigned long int request,
         break;

     case VHOST_SET_MEM_TABLE:
-        QTAILQ_FOREACH(block, &ram_list.blocks, next)
-        {
-            if (block->fd > 0) {
-                msg.memory.regions[fd_num].userspace_addr =
-                    (uintptr_t) block->host;
-                msg.memory.regions[fd_num].memory_size = block->length;
-                msg.memory.regions[fd_num].guest_phys_addr = block->offset;
-                fds[fd_num++] = block->fd;
+        for (i = 0; i < dev->mem->nregions; ++i) {
+            struct vhost_memory_region *reg = dev->mem->regions + i;
+            fd = qemu_get_ram_fd(reg->guest_phys_addr);
+            if (fd > 0) {
+                msg.memory.regions[fd_num].userspace_addr = 
reg->userspace_addr;
+                msg.memory.regions[fd_num].memory_size  = reg->memory_size;
+                msg.memory.regions[fd_num].guest_phys_addr = 
reg->guest_phys_addr;
+                msg.memory.regions[fd_num].shm_offset = reg->userspace_addr -
+                    (ram_addr_t) 
qemu_get_ram_block_host_ptr(reg->guest_phys_addr);
+                fds[fd_num++] = fd;
             }
         }

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 55ca676..e9eb831 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -29,6 +29,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void 
*host,
                                    MemoryRegion *mr);
 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
 int qemu_get_ram_fd(ram_addr_t addr);
+void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
 void *qemu_get_ram_ptr(ram_addr_t addr);
 void qemu_ram_free(ram_addr_t addr);
 void qemu_ram_free_from_ptr(ram_addr_t addr);






reply via email to

[Prev in Thread] Current Thread [Next in Thread]