diff --git a/Makefrag.am b/Makefrag.am index e001d65d..3eeec0dc 100644 --- a/Makefrag.am +++ b/Makefrag.am @@ -132,6 +132,7 @@ libkernel_a_SOURCES += \ kern/assert.h \ kern/ast.c \ kern/ast.h \ + kern/atomic.h \ kern/boot_script.h \ kern/bootstrap.c \ kern/bootstrap.h \ @@ -160,6 +161,8 @@ libkernel_a_SOURCES += \ kern/ipc_tt.h \ kern/kalloc.h \ kern/kern_types.h \ + kern/kmutex.h \ + kern/kmutex.c \ kern/list.h \ kern/lock.c \ kern/lock.h \ diff --git a/kern/atomic.h b/kern/atomic.h new file mode 100644 index 00000000..d80dbb34 --- /dev/null +++ b/kern/atomic.h @@ -0,0 +1,35 @@ +/* Copyright (C) 2016 Free Software Foundation, Inc. + Contributed by Agustina Arzille , 2017. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either + version 2 of the license, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . +*/ + +#ifndef _KERN_ATOMIC_H_ +#define _KERN_ATOMIC_H_ 1 + +#define atomic_cas_bool(ptr, exp, nval) \ + ({ \ + typeof(exp) __e = (exp); \ + __atomic_compare_exchange_n ((ptr), &__e, (nval), 0, \ + __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); \ + }) + +#define atomic_swap(ptr, val) \ + __atomic_exchange_n ((ptr), (val), __ATOMIC_SEQ_CST) + +#define atomic_store(ptr, val) \ + __atomic_store_n ((ptr), (val), __ATOMIC_SEQ_CST) + +#endif diff --git a/kern/gsync.c b/kern/gsync.c index e70e1199..b32a73dd 100644 --- a/kern/gsync.c +++ b/kern/gsync.c @@ -17,36 +17,59 @@ */ #include +#include #include #include -#include #include #include +#include /* An entry in the global hash table. */ struct gsync_hbucket { struct list entries; - decl_simple_lock_data (, lock) + struct kmutex lock; }; /* A key used to uniquely identify an address that a thread is * waiting on. Its members' values depend on whether said * address is shared or task-local. */ -struct gsync_key +union gsync_key { - unsigned long u; - unsigned long v; + struct + { + vm_map_t map; + vm_offset_t addr; + } local; + + struct + { + vm_offset_t off; + vm_object_t obj; + } shared; + + struct + { + unsigned long u; + unsigned long v; + } any; }; /* A thread that is blocked on an address with 'gsync_wait'. */ struct gsync_waiter { struct list link; - struct gsync_key key; + union gsync_key key; thread_t waiter; }; +/* Needed data for temporary mappings. */ +struct vm_args +{ + vm_object_t obj; + vm_offset_t off; +}; + #define GSYNC_NBUCKETS 512 static struct gsync_hbucket gsync_buckets[GSYNC_NBUCKETS]; @@ -56,99 +79,85 @@ void gsync_setup (void) for (i = 0; i < GSYNC_NBUCKETS; ++i) { list_init (&gsync_buckets[i].entries); - simple_lock_init (&gsync_buckets[i].lock); + kmutex_init (&gsync_buckets[i].lock); } } /* Convenience comparison functions for gsync_key's. */ static inline int -gsync_key_eq (const struct gsync_key *lp, - const struct gsync_key *rp) +gsync_key_eq (const union gsync_key *lp, + const union gsync_key *rp) { - return (lp->u == rp->u && lp->v == rp->v); + return (lp->any.u == rp->any.u && lp->any.v == rp->any.v); } static inline int -gsync_key_lt (const struct gsync_key *lp, - const struct gsync_key *rp) +gsync_key_lt (const union gsync_key *lp, + const union gsync_key *rp) { - return (lp->u < rp->u || (lp->u == rp->u && lp->v < rp->v)); + return (lp->any.u < rp->any.u || + (lp->any.u == rp->any.u && lp->any.v < rp->any.v)); } #define MIX2_LL(x, y) ((((x) << 5) | ((x) >> 27)) ^ (y)) static inline unsigned int -gsync_key_hash (const struct gsync_key *keyp) +gsync_key_hash (const union gsync_key *keyp) { unsigned int ret = sizeof (void *); #ifndef __LP64__ - ret = MIX2_LL (ret, keyp->u); - ret = MIX2_LL (ret, keyp->v); + ret = MIX2_LL (ret, keyp->any.u); + ret = MIX2_LL (ret, keyp->any.v); #else - ret = MIX2_LL (ret, keyp->u & ~0U); - ret = MIX2_LL (ret, keyp->u >> 32); - ret = MIX2_LL (ret, keyp->v & ~0U); - ret = MIX2_LL (ret, keyp->v >> 32); + ret = MIX2_LL (ret, keyp->any.u & ~0U); + ret = MIX2_LL (ret, keyp->any.u >> 32); + ret = MIX2_LL (ret, keyp->any.v & ~0U); + ret = MIX2_LL (ret, keyp->any.v >> 32); #endif return (ret); } -/* Test if the passed VM Map can access the address ADDR. The - * parameter FLAGS is used to specify the width and protection - * of the address. */ -static int -valid_access_p (vm_map_t map, vm_offset_t addr, int flags) -{ - vm_prot_t prot = VM_PROT_READ | - ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0); - vm_offset_t size = sizeof (unsigned int) * - ((flags & GSYNC_QUAD) ? 2 : 1); - - vm_map_entry_t entry; - return (vm_map_lookup_entry (map, addr, &entry) && - entry->vme_end >= addr + size && - (prot & entry->protection) == prot); -} - /* Given a task and an address, initialize the key at *KEYP and * return the corresponding bucket in the global hash table. */ static int -gsync_fill_key (task_t task, vm_offset_t addr, - int flags, struct gsync_key *keyp) +gsync_fill_key (task_t task, vm_offset_t addr, int flags, + union gsync_key *keyp, struct vm_args *vap, boolean_t add_ref) { - if (flags & GSYNC_SHARED) + vm_map_t map = task->map; + vm_prot_t prot = VM_PROT_READ | + ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0); + vm_map_version_t ver; + vm_prot_t rprot; + boolean_t wired_p; + + if (unlikely (vm_map_lookup (&map, addr, prot, &ver, + &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS || + (prot & rprot) != prot)) + return (-1); + else if (flags & GSYNC_SHARED) { /* For a shared address, we need the VM object * and offset as the keys. */ - vm_map_t map = task->map; - vm_prot_t prot = VM_PROT_READ | - ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0); - vm_map_version_t ver; - vm_prot_t rpr; - vm_object_t obj; - vm_offset_t off; - boolean_t wired_p; - - if (unlikely (vm_map_lookup (&map, addr, prot, &ver, - &obj, &off, &rpr, &wired_p) != KERN_SUCCESS)) - return (-1); - - /* The VM object is returned locked. However, we check the - * address' accessibility later, so we can release it. */ - vm_object_unlock (obj); - - keyp->u = (unsigned long)obj; - keyp->v = (unsigned long)off; + keyp->shared.obj = vap->obj; + keyp->shared.off = vap->off; } else { /* Task-local address. The keys are the task's map and * the virtual address itself. */ - keyp->u = (unsigned long)task->map; - keyp->v = (unsigned long)addr; + keyp->local.map = map; + keyp->local.addr = addr; } + if (add_ref) + /* The VM object is returned locked. However, we later acquire + * a sleeping lock for a bucket, so we must not hold any simple + * locks. To prevent this object from going away, we add a reference + * to it when requested. */ + vm_object_reference_locked (vap->obj); + + vm_object_unlock (vap->obj); return ((int)(gsync_key_hash (keyp) % GSYNC_NBUCKETS)); } @@ -160,7 +169,7 @@ node_to_waiter (struct list *nodep) static inline struct list* gsync_find_key (const struct list *entries, - const struct gsync_key *keyp, int *exactp) + const union gsync_key *keyp, int *exactp) { /* Look for a key that matches. We take advantage of the fact * that the entries are sorted to break out of the loop as @@ -182,57 +191,94 @@ gsync_find_key (const struct list *entries, return (runp); } -kern_return_t gsync_wait (task_t task, vm_offset_t addr, - unsigned int lo, unsigned int hi, natural_t msec, int flags) +/* Create a temporary mapping in the kernel.*/ +static inline vm_offset_t +temp_mapping (struct vm_args *vap, vm_offset_t addr, vm_prot_t prot) { - if (unlikely (task != current_task())) - /* Not implemented yet. */ - return (KERN_FAILURE); + vm_offset_t paddr; + vm_offset_t off = vap->off - (addr - trunc_page (addr)); - struct gsync_waiter w; - int bucket = gsync_fill_key (task, addr, flags, &w.key); + if (vm_map_enter (kernel_map, &paddr, PAGE_SIZE, + 0, TRUE, vap->obj, off, FALSE, prot, VM_PROT_ALL, + VM_INHERIT_DEFAULT) != KERN_SUCCESS) + paddr = 0; - if (unlikely (bucket < 0)) + return (paddr); +} + +kern_return_t gsync_wait (task_t task, vm_offset_t addr, + unsigned int lo, unsigned int hi, natural_t msec, int flags) +{ + if (unlikely (task == 0)) + return (KERN_INVALID_TASK); + else if (unlikely (addr % sizeof (int) != 0)) return (KERN_INVALID_ADDRESS); - /* Test that the address is actually valid for the - * given task. Do so with the read-lock held in order - * to prevent memory deallocations. */ vm_map_lock_read (task->map); - struct gsync_hbucket *hbp = gsync_buckets + bucket; - simple_lock (&hbp->lock); + struct gsync_waiter w; + struct vm_args va; + boolean_t remote = task != current_task (); + int bucket = gsync_fill_key (task, addr, flags, &w.key, &va, remote); - if (unlikely (!valid_access_p (task->map, addr, flags))) + if (unlikely (bucket < 0)) { - simple_unlock (&hbp->lock); vm_map_unlock_read (task->map); return (KERN_INVALID_ADDRESS); } + struct gsync_hbucket *hbp = gsync_buckets + bucket; + kmutex_lock (&hbp->lock, FALSE); + /* Before doing any work, check that the expected value(s) * match the contents of the address. Otherwise, the waiting * thread could potentially miss a wakeup. */ - if (((unsigned int *)addr)[0] != lo || - ((flags & GSYNC_QUAD) && - ((unsigned int *)addr)[1] != hi)) + + boolean_t equal; + if (likely (! remote)) + equal = ((unsigned int *)addr)[0] == lo && + ((flags & GSYNC_QUAD) == 0 || + ((unsigned int *)addr)[1] == hi); + else { - simple_unlock (&hbp->lock); - vm_map_unlock_read (task->map); - return (KERN_INVALID_ARGUMENT); + vm_offset_t paddr = temp_mapping (&va, addr, VM_PROT_READ); + if (unlikely (paddr == 0)) + { + kmutex_unlock (&hbp->lock); + vm_map_unlock_read (task->map); + vm_object_deallocate (va.obj); + return (KERN_MEMORY_FAILURE); + } + + vm_offset_t off = addr & (PAGE_SIZE - 1); + paddr += off; + + equal = ((unsigned int *)paddr)[0] == lo && + ((flags & GSYNC_QUAD) == 0 || + ((unsigned int *)paddr)[1] == hi); + + paddr -= off; + + /* Note that the call to 'vm_map_remove' will unreference + * the VM object, so we don't have to do it ourselves. */ + vm_map_remove (kernel_map, paddr, paddr + PAGE_SIZE); } + /* Done with the task's map. */ vm_map_unlock_read (task->map); + if (! equal) + { + kmutex_unlock (&hbp->lock); + return (KERN_INVALID_ARGUMENT); + } + /* Look for the first entry in the hash bucket that * compares strictly greater than this waiter. */ struct list *runp; list_for_each (&hbp->entries, runp) - { - struct gsync_waiter *p = node_to_waiter (runp); - if (gsync_key_lt (&w.key, &p->key)) - break; - } + if (gsync_key_lt (&w.key, &node_to_waiter(runp)->key)) + break; /* Finally, add ourselves to the list and go to sleep. */ list_add (runp->prev, runp, &w.link); @@ -243,24 +289,23 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr, else thread_will_wait (w.waiter); - thread_sleep (0, (simple_lock_t)&hbp->lock, TRUE); + kmutex_unlock (&hbp->lock); + thread_block (thread_no_continuation); /* We're back. */ - kern_return_t ret = current_thread()->wait_result; - if (ret != THREAD_AWAKENED) + kern_return_t ret = KERN_SUCCESS; + if (current_thread()->wait_result != THREAD_AWAKENED) { /* We were interrupted or timed out. */ - simple_lock (&hbp->lock); - if (w.link.next != 0) + kmutex_lock (&hbp->lock, FALSE); + if (!list_node_unlinked (&w.link)) list_remove (&w.link); - simple_unlock (&hbp->lock); + kmutex_unlock (&hbp->lock); /* Map the error code. */ - ret = ret == THREAD_INTERRUPTED ? + ret = current_thread()->wait_result == THREAD_INTERRUPTED ? KERN_INTERRUPTED : KERN_TIMEDOUT; } - else - ret = KERN_SUCCESS; return (ret); } @@ -281,34 +326,55 @@ dequeue_waiter (struct list *nodep) kern_return_t gsync_wake (task_t task, vm_offset_t addr, unsigned int val, int flags) { - if (unlikely (task != current_task())) - /* Not implemented yet. */ - return (KERN_FAILURE); - - struct gsync_key key; - int bucket = gsync_fill_key (task, addr, flags, &key); - - if (unlikely (bucket < 0)) + if (unlikely (task == 0)) + return (KERN_INVALID_TASK); + else if (unlikely (addr % sizeof (int) != 0)) return (KERN_INVALID_ADDRESS); - kern_return_t ret = KERN_INVALID_ARGUMENT; - vm_map_lock_read (task->map); - struct gsync_hbucket *hbp = gsync_buckets + bucket; - simple_lock (&hbp->lock); - if (unlikely (!valid_access_p (task->map, addr, flags))) + union gsync_key key; + struct vm_args va; + int bucket = gsync_fill_key (task, addr, flags, &key, &va, + current_task () != task && (flags & GSYNC_MUTATE) != 0); + + if (unlikely (bucket < 0)) { - simple_unlock (&hbp->lock); vm_map_unlock_read (task->map); return (KERN_INVALID_ADDRESS); } + kern_return_t ret = KERN_INVALID_ARGUMENT; + struct gsync_hbucket *hbp = gsync_buckets + bucket; + + kmutex_lock (&hbp->lock, FALSE); + if (flags & GSYNC_MUTATE) - /* Set the contents of the address to the specified value, - * even if we don't end up waking any threads. Note that - * the buckets' simple locks give us atomicity. */ - *(unsigned int *)addr = val; + { + /* Set the contents of the address to the specified value, + * even if we don't end up waking any threads. Note that + * the buckets' simple locks give us atomicity. */ + + if (unlikely (task != current_task ())) + { + vm_offset_t paddr = temp_mapping (&va, addr, + VM_PROT_READ | VM_PROT_WRITE); + + if (unlikely (paddr == 0)) + { + kmutex_unlock (&hbp->lock); + vm_map_unlock_read (task->map); + vm_object_deallocate (va.obj); + return (KERN_MEMORY_FAILURE); + } + + addr = paddr + (addr & (PAGE_SIZE - 1)); + } + + *(unsigned int *)addr = val; + if (unlikely (task != current_task ())) + vm_map_remove (kernel_map, addr, addr + sizeof (int)); + } vm_map_unlock_read (task->map); @@ -325,37 +391,28 @@ kern_return_t gsync_wake (task_t task, ret = KERN_SUCCESS; } - simple_unlock (&hbp->lock); + kmutex_unlock (&hbp->lock); return (ret); } kern_return_t gsync_requeue (task_t task, vm_offset_t src, vm_offset_t dst, boolean_t wake_one, int flags) { - if (unlikely (task != current_task())) - /* Not implemented yet. */ - return (KERN_FAILURE); - - struct gsync_key src_k, dst_k; - int src_bkt = gsync_fill_key (task, src, flags, &src_k); - int dst_bkt = gsync_fill_key (task, dst, flags, &dst_k); - - if ((src_bkt | dst_bkt) < 0) + if (unlikely (task == 0)) + return (KERN_INVALID_TASK); + else if (unlikely (src % sizeof (int) != 0 || dst % sizeof (int) != 0)) return (KERN_INVALID_ADDRESS); - vm_map_lock_read (task->map); + union gsync_key src_k, dst_k; + struct vm_args va; - /* We don't actually dereference or modify the contents - * of the addresses, but we still check that they can - * be accessed by the task. */ - if (unlikely (!valid_access_p (task->map, src, flags) || - !valid_access_p (task->map, dst, flags))) - { - vm_map_unlock_read (task->map); - return (KERN_INVALID_ADDRESS); - } + int src_bkt = gsync_fill_key (task, src, flags, &src_k, &va, FALSE); + if (unlikely (src_bkt < 0)) + return (KERN_INVALID_ADDRESS); - vm_map_unlock_read (task->map); + int dst_bkt = gsync_fill_key (task, dst, flags, &dst_k, &va, FALSE); + if (unlikely (dst_bkt < 0)) + return (KERN_INVALID_ADDRESS); /* If we're asked to unconditionally wake up a waiter, then * we need to remove a maximum of two threads from the queue. */ @@ -365,16 +422,16 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src, /* Acquire the locks in order, to prevent any potential deadlock. */ if (bp1 == bp2) - simple_lock (&bp1->lock); + kmutex_lock (&bp1->lock, FALSE); else if ((unsigned long)bp1 < (unsigned long)bp2) { - simple_lock (&bp1->lock); - simple_lock (&bp2->lock); + kmutex_lock (&bp1->lock, FALSE); + kmutex_lock (&bp2->lock, FALSE); } else { - simple_lock (&bp2->lock); - simple_lock (&bp1->lock); + kmutex_lock (&bp2->lock, FALSE); + kmutex_lock (&bp1->lock, FALSE); } kern_return_t ret = KERN_SUCCESS; @@ -416,9 +473,9 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src, } /* Release the locks and we're done.*/ - simple_unlock (&bp1->lock); + kmutex_unlock (&bp1->lock); if (bp1 != bp2) - simple_unlock (&bp2->lock); + kmutex_unlock (&bp2->lock); return (ret); } diff --git a/kern/kmutex.c b/kern/kmutex.c new file mode 100644 index 00000000..3a3bd540 --- /dev/null +++ b/kern/kmutex.c @@ -0,0 +1,80 @@ +/* Copyright (C) 2016 Free Software Foundation, Inc. + Contributed by Agustina Arzille , 2017. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either + version 2 of the license, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . +*/ + +#include +#include +#include +#include + +void kmutex_init (struct kmutex *mtxp) +{ + mtxp->state = KMUTEX_AVAIL; + simple_lock_init (&mtxp->lock); +} + +#define locked_swap(ptr, val) \ + ({ \ + typeof (*ptr) __tmp = *(ptr); \ + *(ptr) = (val); \ + __tmp; \ + }) + +int kmutex_lock (struct kmutex *mtxp, int interruptible) +{ + if (atomic_cas_bool (&mtxp->state, KMUTEX_AVAIL, KMUTEX_LOCKED)) + /* Unowned mutex - We're done. */ + return (0); + + /* The mutex is locked. We may have to sleep. */ + simple_lock (&mtxp->lock); + if (locked_swap (&mtxp->state, KMUTEX_CONTENDED) == KMUTEX_AVAIL) + { + /* The mutex was released in-between. */ + simple_unlock (&mtxp->lock); + return (0); + } + + /* Sleep and check the result value of the waiting, in order to + * inform our caller if we were interrupted or not. Note that + * we don't need to set again the mutex state. The owner will + * handle that in every case. */ + thread_sleep ((event_t)mtxp, (simple_lock_t)&mtxp->lock, interruptible); + return (current_thread()->wait_result == THREAD_AWAKENED ? 0 : -1); +} + +int kmutex_trylock (struct kmutex *mtxp) +{ + return (atomic_cas_bool (&mtxp->state, + KMUTEX_AVAIL, KMUTEX_LOCKED) ? 0 : -1); +} + +void kmutex_unlock (struct kmutex *mtxp) +{ + if (atomic_cas_bool (&mtxp->state, KMUTEX_LOCKED, KMUTEX_AVAIL)) + /* No waiters - We're done. */ + return; + + simple_lock (&mtxp->lock); + + if (!thread_wakeup_one ((event_t)mtxp)) + /* The waiter was interrupted and left - Reset the mutex state. */ + mtxp->state = KMUTEX_AVAIL; + + simple_unlock (&mtxp->lock); +} + diff --git a/kern/kmutex.h b/kern/kmutex.h new file mode 100644 index 00000000..b5f39886 --- /dev/null +++ b/kern/kmutex.h @@ -0,0 +1,43 @@ +/* Copyright (C) 2016 Free Software Foundation, Inc. + Contributed by Agustina Arzille , 2017. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either + version 2 of the license, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . +*/ + +#ifndef _KERN_KMUTEX_H_ +#define _KERN_KMUTEX_H_ 1 + +#include + +struct kmutex +{ + unsigned int state; + decl_simple_lock_data (, lock) +}; + +/* Possible values for the mutex state. */ +#define KMUTEX_AVAIL 0 +#define KMUTEX_LOCKED 1 +#define KMUTEX_CONTENDED 2 + +extern void kmutex_init (struct kmutex *mtxp); + +extern int kmutex_lock (struct kmutex *mtxp, boolean_t interruptible); + +extern int kmutex_trylock (struct kmutex *mtxp); + +extern void kmutex_unlock (struct kmutex *mtxp); + +#endif diff --git a/kern/sched_prim.c b/kern/sched_prim.c index bb767352..173f0687 100644 --- a/kern/sched_prim.c +++ b/kern/sched_prim.c @@ -376,13 +376,14 @@ void clear_wait( * and thread_wakeup_one. * */ -void thread_wakeup_prim( +boolean_t thread_wakeup_prim( event_t event, boolean_t one_thread, int result) { queue_t q; int index; + boolean_t woke = FALSE; thread_t thread, next_th; decl_simple_lock_data( , *lock); spl_t s; @@ -435,6 +436,7 @@ void thread_wakeup_prim( break; } thread_unlock(thread); + woke = TRUE; if (one_thread) break; } @@ -442,6 +444,7 @@ void thread_wakeup_prim( } simple_unlock(lock); splx(s); + return (woke); } /* diff --git a/kern/sched_prim.h b/kern/sched_prim.h index dfb2f54b..405e5456 100644 --- a/kern/sched_prim.h +++ b/kern/sched_prim.h @@ -72,7 +72,7 @@ extern void thread_sleep( simple_lock_t lock, boolean_t interruptible); extern void thread_wakeup(void); /* for function pointers */ -extern void thread_wakeup_prim( +extern boolean_t thread_wakeup_prim( event_t event, boolean_t one_thread, int result); diff --git a/vm/vm_object.h b/vm/vm_object.h index f8f9bf8d..d42eeb4e 100644 --- a/vm/vm_object.h +++ b/vm/vm_object.h @@ -396,4 +396,10 @@ MACRO_END extern int vm_object_external_count; extern int vm_object_external_pages; +/* Add a reference to a locked VM object. */ +#define vm_object_reference_locked(obj) ++(obj)->ref_count + +/* Remove a reference to a locked VM objec.t */ +#define vm_object_unreference_locked(obj) --(obj)->ref_count + #endif /* _VM_VM_OBJECT_H_ */