bug-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] vm_map: augment VM maps with a red-black tree


From: Richard Braun
Subject: [PATCH] vm_map: augment VM maps with a red-black tree
Date: Tue, 24 Apr 2012 00:52:40 +0200

Because of heavy map entry fragmentation and a poor map entry merging
implementation, the kernel can sometimes need thousands of map entries
to describe the address space of a task (most often a pager). This
change introduces a red-black tree in the VM maps with the purpose of
speeding up entry lookups.

Feedbacks on performance improvements using this patch are welcome.
---
 version.m4  |    2 +-
 vm/vm_map.c |  112 ++++++++++++++++++++++++++++++++--------------------------
 vm/vm_map.h |   11 ++++--
 3 files changed, 71 insertions(+), 54 deletions(-)

diff --git a/version.m4 b/version.m4
index 3bf4275..4185e45 100644
--- a/version.m4
+++ b/version.m4
@@ -1,4 +1,4 @@
 m4_define([AC_PACKAGE_NAME],[GNU Mach])
-m4_define([AC_PACKAGE_VERSION],[1.3.99])
+m4_define([AC_PACKAGE_VERSION],[1.3.99_redblackify_vmmap])
 m4_define([AC_PACKAGE_BUGREPORT],[bug-hurd@gnu.org])
 m4_define([AC_PACKAGE_TARNAME],[gnumach])
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 8012bcf..487f8bb 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -42,6 +42,7 @@
 #include <kern/assert.h>
 #include <kern/debug.h>
 #include <kern/kalloc.h>
+#include <kern/rbtree.h>
 #include <kern/slab.h>
 #include <vm/pmap.h>
 #include <vm/vm_fault.h>
@@ -213,6 +214,7 @@ void vm_map_setup(map, pmap, min, max, pageable)
        vm_map_last_entry(map)  = vm_map_to_entry(map);
        map->hdr.nentries = 0;
        map->hdr.entries_pageable = pageable;
+       rbtree_init(&map->hdr.tree);
 
        map->size = 0;
        map->ref_count = 1;
@@ -307,9 +309,40 @@ void _vm_map_entry_dispose(map_header, entry)
 }
 
 /*
+ *     Red-black tree lookup/insert comparison functions
+ */
+static inline int vm_map_entry_cmp_lookup(vm_offset_t addr,
+                                          const struct rbtree_node *node)
+{
+    struct vm_map_entry *entry;
+
+    entry = rbtree_entry(node, struct vm_map_entry, tree_node);
+
+    if (addr >= entry->vme_end)
+        return 1;
+
+    if (addr >= entry->vme_start)
+        return 0;
+
+    return -1;
+}
+
+static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
+                                          const struct rbtree_node *b)
+{
+    struct vm_map_entry *entry;
+
+    entry = rbtree_entry(a, struct vm_map_entry, tree_node);
+    return vm_map_entry_cmp_lookup(entry->vme_start, b);
+}
+
+/*
  *     vm_map_entry_{un,}link:
  *
  *     Insert/remove entries from maps (or map copies).
+ *
+ *     The start and end addresses of the entries must be properly set
+ *     before using these macros.
  */
 #define vm_map_entry_link(map, after_where, entry)     \
        _vm_map_entry_link(&(map)->hdr, after_where, entry)
@@ -324,6 +357,8 @@ void _vm_map_entry_dispose(map_header, entry)
        (entry)->vme_next = (after_where)->vme_next;    \
        (entry)->vme_prev->vme_next =                   \
         (entry)->vme_next->vme_prev = (entry);         \
+       rbtree_insert(&(hdr)->tree, &(entry)->tree_node,        \
+                     vm_map_entry_cmp_insert);         \
        MACRO_END
 
 #define vm_map_entry_unlink(map, entry)                        \
@@ -337,6 +372,7 @@ void _vm_map_entry_dispose(map_header, entry)
        (hdr)->nentries--;                              \
        (entry)->vme_next->vme_prev = (entry)->vme_prev; \
        (entry)->vme_prev->vme_next = (entry)->vme_next; \
+       rbtree_remove(&(hdr)->tree, &(entry)->tree_node);       \
        MACRO_END
 
 /*
@@ -413,70 +449,41 @@ boolean_t vm_map_lookup_entry(map, address, entry)
        register vm_offset_t    address;
        vm_map_entry_t          *entry;         /* OUT */
 {
-       register vm_map_entry_t         cur;
-       register vm_map_entry_t         last;
+       register struct rbtree_node     *node;
+       register vm_map_entry_t         hint;
 
        /*
-        *      Start looking either from the head of the
-        *      list, or from the hint.
+        *      First, make a quick check to see if we are already
+        *      looking at the entry we want (which is often the case).
         */
 
        simple_lock(&map->hint_lock);
-       cur = map->hint;
+       hint = map->hint;
        simple_unlock(&map->hint_lock);
 
-       if (cur == vm_map_to_entry(map))
-               cur = cur->vme_next;
-
-       if (address >= cur->vme_start) {
-               /*
-                *      Go from hint to end of list.
-                *
-                *      But first, make a quick check to see if
-                *      we are already looking at the entry we
-                *      want (which is usually the case).
-                *      Note also that we don't need to save the hint
-                *      here... it is the same hint (unless we are
-                *      at the header, in which case the hint didn't
-                *      buy us anything anyway).
-                */
-               last = vm_map_to_entry(map);
-               if ((cur != last) && (cur->vme_end > address)) {
-                       *entry = cur;
-                       return(TRUE);
-               }
-       }
-       else {
-               /*
-                *      Go from start to hint, *inclusively*
-                */
-               last = cur->vme_next;
-               cur = vm_map_first_entry(map);
+       if (hint != vm_map_to_entry(map)
+           && (hint->vme_start <= address)
+           && (address < hint->vme_end)) {
+               *entry = hint;
+               return(TRUE);
        }
 
        /*
-        *      Search linearly
+        *      If the hint didn't help, use the red-black tree.
         */
 
-       while (cur != last) {
-               if (cur->vme_end > address) {
-                       if (address >= cur->vme_start) {
-                               /*
-                                *      Save this lookup for future
-                                *      hints, and return
-                                */
+       node = rbtree_lookup_nearest(&map->hdr.tree, address,
+                                    vm_map_entry_cmp_lookup, RBTREE_LEFT);
 
-                               *entry = cur;
-                               SAVE_HINT(map, cur);
-                               return(TRUE);
-                       }
-                       break;
-               }
-               cur = cur->vme_next;
+       if (node == NULL) {
+               *entry = vm_map_to_entry(map);
+               SAVE_HINT(map, *entry);
+               return(FALSE);
+       } else {
+               *entry = rbtree_entry(node, struct vm_map_entry, tree_node);
+               SAVE_HINT(map, *entry);
+               return((address < (*entry)->vme_end) ? TRUE : FALSE);
        }
-       *entry = cur->vme_prev;
-       SAVE_HINT(map, *entry);
-       return(FALSE);
 }
 
 /*
@@ -2414,6 +2421,10 @@ start_pass_1:
  */
 #define        vm_map_copy_insert(map, where, copy)                            
\
        MACRO_BEGIN                                                     \
+       struct rbtree_node *node, *tmp;                                 \
+       rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp)        \
+               rbtree_insert(&(map)->hdr.tree, node,                   \
+                             vm_map_entry_cmp_insert);                 \
        (((where)->vme_next)->vme_prev = vm_map_copy_last_entry(copy))  \
                ->vme_next = ((where)->vme_next);                       \
        ((where)->vme_next = vm_map_copy_first_entry(copy))             \
@@ -3138,6 +3149,7 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, 
src_destroy, copy_result)
        copy->type = VM_MAP_COPY_ENTRY_LIST;
        copy->cpy_hdr.nentries = 0;
        copy->cpy_hdr.entries_pageable = TRUE;
+       rbtree_init(&copy->cpy_hdr.tree);
 
        copy->offset = src_addr;
        copy->size = len;
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 381c7cf..d5bcf96 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -51,6 +51,7 @@
 #include <vm/vm_page.h>
 #include <vm/vm_types.h>
 #include <kern/lock.h>
+#include <kern/rbtree.h>
 #include <kern/macro_help.h>
 
 #define KENTRY_DATA_SIZE (32*PAGE_SIZE)
@@ -102,6 +103,7 @@ struct vm_map_entry {
 #define vme_next               links.next
 #define vme_start              links.start
 #define vme_end                        links.end
+       struct rbtree_node      tree_node;      /* links to other entries in 
tree */
        union vm_map_object     object;         /* object I point to */
        vm_offset_t             offset;         /* offset into object */
        unsigned int
@@ -137,6 +139,7 @@ typedef struct vm_map_entry *vm_map_entry_t;
  */
 struct vm_map_header {
        struct vm_map_links     links;          /* first, last, min, max */
+       struct rbtree           tree;           /* Sorted tree of entries */
        int                     nentries;       /* Number of entries */
        boolean_t               entries_pageable;
                                                /* are map entries pageable? */
@@ -152,9 +155,11 @@ struct vm_map_header {
  *
  *     Implementation:
  *             Maps are doubly-linked lists of map entries, sorted
- *             by address.  One hint is used to start
- *             searches again from the last successful search,
- *             insertion, or removal.  Another hint is used to
+ *             by address.  They're also contained in a red-black tree.
+ *             One hint is used to start searches again at the last
+ *             successful search, insertion, or removal.  If the hint
+ *             lookup failed (i.e. the hint didn't refer to the requested
+ *             entry), a BST lookup is performed.  Another hint is used to
  *             quickly find free space.
  */
 struct vm_map {
-- 
1.7.9.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]