[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 2/2] Align kmem_cache objects using __cacheline_aligned
From: |
Justus Winter |
Subject: |
[PATCH 2/2] Align kmem_cache objects using __cacheline_aligned |
Date: |
Tue, 7 Jan 2014 15:51:28 +0100 |
* device/dev_lookup.c: Align kmem_cache objects using __cacheline_aligned.
* device/dev_pager.c: Likewise.
* device/ds_routines.c: Likewise.
* device/net_io.c: Likewise.
* i386/i386/fpu.c: Likewise.
* i386/i386/machine_task.c: Likewise.
* i386/i386/pcb.c: Likewise.
* i386/intel/pmap.c: Likewise.
* ipc/ipc_entry.c: Likewise.
* ipc/ipc_marequest.c: Likewise.
* ipc/ipc_object.c: Likewise.
* ipc/ipc_space.c: Likewise.
* kern/act.c: Likewise.
* kern/processor.c: Likewise.
* kern/slab.c: Likewise.
* kern/task.c: Likewise.
* kern/thread.c: Likewise.
* vm/memory_object_proxy.c: Likewise.
* vm/vm_external.c: Likewise.
* vm/vm_fault.c: Likewise.
* vm/vm_map.c: Likewise.
* vm/vm_object.c: Likewise.
* vm/vm_resident.c: Likewise.
---
device/dev_lookup.c | 3 ++-
device/dev_pager.c | 5 +++--
device/ds_routines.c | 3 ++-
device/net_io.c | 5 +++--
i386/i386/fpu.c | 3 ++-
i386/i386/machine_task.c | 3 ++-
i386/i386/pcb.c | 3 ++-
i386/intel/pmap.c | 5 +++--
ipc/ipc_entry.c | 3 ++-
ipc/ipc_marequest.c | 3 ++-
ipc/ipc_object.c | 3 ++-
ipc/ipc_space.c | 3 ++-
kern/act.c | 3 ++-
kern/processor.c | 3 ++-
kern/slab.c | 7 ++++---
kern/task.c | 3 ++-
kern/thread.c | 3 ++-
vm/memory_object_proxy.c | 3 ++-
vm/vm_external.c | 7 ++++---
vm/vm_fault.c | 3 ++-
vm/vm_map.c | 9 +++++----
vm/vm_object.c | 3 ++-
vm/vm_resident.c | 3 ++-
23 files changed, 56 insertions(+), 33 deletions(-)
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index d371b60..9485c69 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -28,6 +28,7 @@
* Date: 3/89
*/
+#include <cache.h>
#include <mach/port.h>
#include <mach/vm_param.h>
@@ -63,7 +64,7 @@ queue_head_t dev_number_hash_table[NDEVHASH];
decl_simple_lock_data(,
dev_number_lock)
-struct kmem_cache dev_hdr_cache;
+struct kmem_cache dev_hdr_cache __cacheline_aligned;
/*
* Enter device in the number lookup table.
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 3fe47c7..bc764f1 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -30,6 +30,7 @@
* Device pager.
*/
+#include <cache.h>
#include <string.h>
#include <mach/boolean.h>
@@ -124,7 +125,7 @@ typedef struct dev_pager *dev_pager_t;
#define DEV_PAGER_NULL ((dev_pager_t)0)
-struct kmem_cache dev_pager_cache;
+struct kmem_cache dev_pager_cache __cacheline_aligned;
void dev_pager_reference(dev_pager_t ds)
{
@@ -159,7 +160,7 @@ struct dev_pager_entry {
typedef struct dev_pager_entry *dev_pager_entry_t;
queue_head_t dev_pager_hashtable[DEV_PAGER_HASH_COUNT];
-struct kmem_cache dev_pager_hash_cache;
+struct kmem_cache dev_pager_hash_cache __cacheline_aligned;
decl_simple_lock_data(,
dev_pager_hash_lock)
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 82b5252..9505925 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -53,6 +53,7 @@
* Author: Shantanu Goel, University of Utah CSL
*/
+#include <cache.h>
#include <kern/printf.h>
#include <string.h>
@@ -1597,7 +1598,7 @@ void iowait(ior)
*/
#define IOTRAP_REQSIZE 2048
-struct kmem_cache io_trap_cache;
+struct kmem_cache io_trap_cache __cacheline_aligned;
/*
* Initialization. Called from mach_device_init().
diff --git a/device/net_io.c b/device/net_io.c
index 68dcc09..97b5be6 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -38,6 +38,7 @@
* It may change a lot real soon. -cmaeda 11 June 1993
*/
+#include <cache.h>
#include <sys/types.h>
#include <string.h>
@@ -303,7 +304,7 @@ struct net_rcv_port {
};
typedef struct net_rcv_port *net_rcv_port_t;
-struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */
+struct kmem_cache net_rcv_cache __cacheline_aligned; /* cache of
net_rcv_port structs */
#define NET_HASH_SIZE 256
#define N_NET_HASH 4
@@ -322,7 +323,7 @@ struct net_hash_entry {
};
typedef struct net_hash_entry *net_hash_entry_t;
-struct kmem_cache net_hash_entry_cache;
+struct kmem_cache net_hash_entry_cache __cacheline_aligned;
/*
* This structure represents a packet filter with multiple sessions.
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
index fd5f4b6..caab5e1 100644
--- a/i386/i386/fpu.c
+++ b/i386/i386/fpu.c
@@ -36,6 +36,7 @@
* Support for 80387 floating point or FP emulator.
*/
+#include <cache.h>
#include <string.h>
#include <mach/exception.h>
@@ -70,7 +71,7 @@
#endif
int fp_kind = FP_387; /* 80387 present */
-struct kmem_cache ifps_cache; /* cache for FPU save area */
+struct kmem_cache ifps_cache __cacheline_aligned; /* cache for FPU save
area */
static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND
user-provided mxcsr with this security mask */
#if NCPUS == 1
diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c
index 490b102..6c0f48b 100644
--- a/i386/i386/machine_task.c
+++ b/i386/i386/machine_task.c
@@ -20,6 +20,7 @@
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+#include <cache.h>
#include <kern/lock.h>
#include <mach/mach_types.h>
#include <kern/slab.h>
@@ -29,7 +30,7 @@
/* The cache which holds our IO permission bitmaps. */
-struct kmem_cache machine_task_iopb_cache;
+struct kmem_cache machine_task_iopb_cache __cacheline_aligned;
/* Initialize the machine task module. The function is called once at
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
index a0578d1..1731bd8 100644
--- a/i386/i386/pcb.c
+++ b/i386/i386/pcb.c
@@ -24,6 +24,7 @@
* the rights to redistribute these changes.
*/
+#include <cache.h>
#include <stddef.h>
#include <string.h>
@@ -60,7 +61,7 @@
#include <i386/mp_desc.h>
#endif
-struct kmem_cache pcb_cache;
+struct kmem_cache pcb_cache __cacheline_aligned;
vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 2943d26..da773e3 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -55,6 +55,7 @@
* and to when physical maps must be made correct.
*/
+#include <cache.h>
#include <string.h>
#include <mach/machine/vm_types.h>
@@ -133,7 +134,7 @@ decl_simple_lock_data(, pv_free_list_lock)
simple_unlock(&pv_free_list_lock); \
}
-struct kmem_cache pv_list_cache; /* cache of pv_entry structures
*/
+struct kmem_cache pv_list_cache __cacheline_aligned; /*
cache of pv_entry structures */
/*
* Each entry in the pv_head_table is locked by a bit in the
@@ -400,7 +401,7 @@ struct pmap_update_list cpu_update_list[NCPUS];
struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
-struct kmem_cache pmap_cache; /* cache of pmap structures */
+struct kmem_cache pmap_cache __cacheline_aligned; /* cache of
pmap structures */
boolean_t pmap_debug = FALSE; /* flag for debugging prints */
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
index f1d763c..504d0ce 100644
--- a/ipc/ipc_entry.c
+++ b/ipc/ipc_entry.c
@@ -34,6 +34,7 @@
* Primitive functions to manipulate translation entries.
*/
+#include <cache.h>
#include <kern/printf.h>
#include <string.h>
@@ -51,7 +52,7 @@
#include <ipc/ipc_table.h>
#include <ipc/ipc_object.h>
-struct kmem_cache ipc_tree_entry_cache;
+struct kmem_cache ipc_tree_entry_cache __cacheline_aligned;
/*
* Routine: ipc_entry_tree_collision
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
index 06c53eb..3b71a68 100644
--- a/ipc/ipc_marequest.c
+++ b/ipc/ipc_marequest.c
@@ -34,6 +34,7 @@
* Functions to handle msg-accepted requests.
*/
+#include <cache.h>
#include <mach/message.h>
#include <mach/port.h>
#include <kern/lock.h>
@@ -57,7 +58,7 @@
#endif
-struct kmem_cache ipc_marequest_cache;
+struct kmem_cache ipc_marequest_cache __cacheline_aligned;
#define imar_alloc() ((ipc_marequest_t)
kmem_cache_alloc(&ipc_marequest_cache))
#define imar_free(imar) kmem_cache_free(&ipc_marequest_cache,
(vm_offset_t) (imar))
diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
index 982bd4e..3538e14 100644
--- a/ipc/ipc_object.c
+++ b/ipc/ipc_object.c
@@ -31,6 +31,7 @@
* Functions to manipulate IPC objects.
*/
+#include <cache.h>
#include <string.h>
#include <mach/boolean.h>
@@ -54,7 +55,7 @@
#endif /* MACH_KDB */
-struct kmem_cache ipc_object_caches[IOT_NUMBER];
+struct kmem_cache ipc_object_caches[IOT_NUMBER] __cacheline_aligned;
diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
index ab55e83..c87a8cb 100644
--- a/ipc/ipc_space.c
+++ b/ipc/ipc_space.c
@@ -36,6 +36,7 @@
* Functions to manipulate IPC capability spaces.
*/
+#include <cache.h>
#include <string.h>
#include <mach/boolean.h>
@@ -55,7 +56,7 @@
-struct kmem_cache ipc_space_cache;
+struct kmem_cache ipc_space_cache __cacheline_aligned;
ipc_space_t ipc_space_kernel;
ipc_space_t ipc_space_reply;
diff --git a/kern/act.c b/kern/act.c
index b8ad602..c262db6 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -26,6 +26,7 @@
#ifdef MIGRATING_THREADS
+#include <cache.h>
#include <string.h>
#include <mach/kern_return.h>
@@ -46,7 +47,7 @@ static void special_handler(ReturnHandler *rh, struct Act
*act);
#endif
#ifndef ACT_STATIC_KLUDGE
-static struct kmem_cache act_cache;
+static struct kmem_cache act_cache __cacheline_aligned;
#else
static Act *act_freelist;
static Act free_acts[ACT_STATIC_KLUDGE];
diff --git a/kern/processor.c b/kern/processor.c
index 865c324..94b143a 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -27,6 +27,7 @@
* processor.c: processor and processor_set manipulation routines.
*/
+#include <cache.h>
#include <string.h>
#include <mach/boolean.h>
@@ -48,7 +49,7 @@
#if MACH_HOST
#include <kern/slab.h>
-struct kmem_cache pset_cache;
+struct kmem_cache pset_cache __cacheline_aligned;
#endif /* MACH_HOST */
diff --git a/kern/slab.c b/kern/slab.c
index d1e3632..dd253f3 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -76,6 +76,7 @@
* handled likewise.
*/
+#include <cache.h>
#include <string.h>
#include <kern/assert.h>
#include <kern/mach_clock.h>
@@ -257,18 +258,18 @@ static struct kmem_cpu_pool_type kmem_cpu_pool_types[] = {
/*
* Caches where CPU pool arrays are allocated from.
*/
-static struct kmem_cache
kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)];
+static struct kmem_cache
kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)] __cacheline_aligned;
#endif /* SLAB_USE_CPU_POOLS */
/*
* Cache for off slab data.
*/
-static struct kmem_cache kmem_slab_cache;
+static struct kmem_cache kmem_slab_cache __cacheline_aligned;
/*
* General purpose caches array.
*/
-static struct kmem_cache kalloc_caches[KALLOC_NR_CACHES];
+static struct kmem_cache kalloc_caches[KALLOC_NR_CACHES] __cacheline_aligned;
/*
* List of all caches managed by the allocator.
diff --git a/kern/task.c b/kern/task.c
index 13b3c76..f18c19d 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -31,6 +31,7 @@
* Task management primitives implementation.
*/
+#include <cache.h>
#include <string.h>
#include <mach/machine/vm_types.h>
@@ -52,7 +53,7 @@
#include <machine/machspl.h> /* for splsched */
task_t kernel_task = TASK_NULL;
-struct kmem_cache task_cache;
+struct kmem_cache task_cache __cacheline_aligned;
void task_init(void)
{
diff --git a/kern/thread.c b/kern/thread.c
index 8474950..93f80cc 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -31,6 +31,7 @@
* Thread management primitives implementation.
*/
+#include <cache.h>
#include <kern/printf.h>
#include <mach/std_types.h>
#include <mach/policy.h>
@@ -69,7 +70,7 @@
thread_t active_threads[NCPUS];
vm_offset_t active_stacks[NCPUS];
-struct kmem_cache thread_cache;
+struct kmem_cache thread_cache __cacheline_aligned;
queue_head_t reaper_queue;
decl_simple_lock_data(, reaper_lock)
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index a64bfcc..6deb0e8 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -36,6 +36,7 @@
mask, and the memory object is validated at the time the mapping is
established. */
+#include <cache.h>
#include <mach/port.h>
#include <mach/kern_return.h>
#include <mach/notify.h>
@@ -48,7 +49,7 @@
#include <vm/memory_object_proxy.h>
/* The cache which holds our proxy memory objects. */
-static struct kmem_cache memory_object_proxy_cache;
+static struct kmem_cache memory_object_proxy_cache __cacheline_aligned;
struct memory_object_proxy
{
diff --git a/vm/vm_external.c b/vm/vm_external.c
index 77bd44b..554ba18 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -30,6 +30,7 @@
* information takes the form of hints.
*/
+#include <cache.h>
#include <mach/boolean.h>
#include <kern/slab.h>
#include <vm/vm_external.h>
@@ -40,7 +41,7 @@
boolean_t vm_external_unsafe = FALSE;
-struct kmem_cache vm_external_cache;
+struct kmem_cache vm_external_cache __cacheline_aligned;
/*
* The implementation uses bit arrays to record whether
@@ -52,8 +53,8 @@ struct kmem_cache vm_external_cache;
#define SMALL_SIZE (VM_EXTERNAL_SMALL_SIZE/8)
#define LARGE_SIZE (VM_EXTERNAL_LARGE_SIZE/8)
-struct kmem_cache vm_object_small_existence_map_cache;
-struct kmem_cache vm_object_large_existence_map_cache;
+struct kmem_cache vm_object_small_existence_map_cache __cacheline_aligned;
+struct kmem_cache vm_object_large_existence_map_cache __cacheline_aligned;
vm_external_t vm_external_create(size)
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index d9c3d7b..d0f2866 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -33,6 +33,7 @@
* Page fault handling module.
*/
+#include <cache.h>
#include <kern/printf.h>
#include <vm/vm_fault.h>
#include <mach/kern_return.h>
@@ -84,7 +85,7 @@ typedef struct vm_fault_state {
vm_prot_t vmfp_access;
} vm_fault_state_t;
-struct kmem_cache vm_fault_state_cache;
+struct kmem_cache vm_fault_state_cache __cacheline_aligned;
int vm_object_absent_max = 50;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index e6eabdb..d9a110c 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -34,6 +34,7 @@
* Virtual memory mapping module.
*/
+#include <cache.h>
#include <kern/printf.h>
#include <mach/kern_return.h>
#include <mach/port.h>
@@ -124,10 +125,10 @@ MACRO_END
* vm_object_copy_strategically() in vm_object.c.
*/
-struct kmem_cache vm_map_cache; /* cache for vm_map structures
*/
-struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry
structures */
-struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry
structures */
-struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy
structures */
+struct kmem_cache vm_map_cache __cacheline_aligned; /* cache for
vm_map structures */
+struct kmem_cache vm_map_entry_cache __cacheline_aligned; /* cache for
vm_map_entry structures */
+struct kmem_cache vm_map_kentry_cache __cacheline_aligned; /* cache for
kernel entry structures */
+struct kmem_cache vm_map_copy_cache __cacheline_aligned; /* cache for
vm_map_copy structures */
/*
* Placeholder object for submap operations. This object is dropped
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 582487e..8ef68c7 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -33,6 +33,7 @@
* Virtual memory object module.
*/
+#include <cache.h>
#include <kern/printf.h>
#include <string.h>
@@ -137,7 +138,7 @@
* ZZZ Continue this comment.
*/
-struct kmem_cache vm_object_cache; /* vm backing store cache */
+struct kmem_cache vm_object_cache __cacheline_aligned; /* vm backing
store cache */
/*
* All wired-down kernel memory belongs to a single virtual
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index fa02cbc..9c66110 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -33,6 +33,7 @@
* Resident memory management module.
*/
+#include <cache.h>
#include <kern/printf.h>
#include <string.h>
@@ -128,7 +129,7 @@ unsigned int vm_page_free_count_minimum; /*
debugging */
* These page structures are allocated the way
* most other kernel structures are.
*/
-struct kmem_cache vm_page_cache;
+struct kmem_cache vm_page_cache __cacheline_aligned;
/*
* Fictitious pages don't have a physical address,
--
1.8.5.2