I changed the code to chain all the allocated ports together in a linked list. Then, I added new fields to ipc_port to track send-only rights and to understand where those rights were being created and where the port was being manipulated. Finally, I added a new KDB command to show all the inactive ports. It was then easy to track exactly how the port was handled inside exception_raise_continue_slow.
I think there might be some situations where it will loop around depending on the scheduling order (see line 798 on sched_prim.c).
I've added a continuation argument to thread_halt_self so that the port is released if the thread resumes using the continuation (followed by thread_exception_return). Otherwise, it may loop and it will eventually also release the reference and call thread_exception_return.
I've also made some cosmetic changes to the code.
diff --git a/kern/ast.c b/kern/ast.c
index 4b9d63d..2772ed3 100644
--- a/kern/ast.c
+++ b/kern/ast.c
@@ -96,7 +96,7 @@ ast_taken(void)
if (self != current_processor()->idle_thread) {
#ifndef MIGRATING_THREADS
while (thread_should_halt(self))
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
#endif
/*
diff --git a/kern/exception.c b/kern/exception.c
index 6cb3bfb..9ad1c42 100644
--- a/kern/exception.c
+++ b/kern/exception.c
@@ -231,7 +231,7 @@ exception_no_server(void)
*/
while (thread_should_halt(self))
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
#if 0
@@ -257,7 +257,7 @@ exception_no_server(void)
*/
(void) task_terminate(self->task);
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
panic("terminating the task didn't kill us");
/*NOTREACHED*/
}
@@ -848,6 +848,26 @@ exception_raise_continue(void)
}
/*
+ * Routine: thread_release_and_exception_return
+ * Purpose:
+ * Continue after thread was halted.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack and
+ * control goes back to thread_exception_return.
+ * Returns:
+ * Doesn't return.
+ */
+static void
+thread_release_and_exception_return(void)
+{
+ ipc_thread_t self = current_thread();
+ /* reply port must be released */
+ ipc_port_release(self->ith_port);
+ thread_exception_return();
+ /*NOTREACHED*/
+}
+
+/*
* Routine: exception_raise_continue_slow
* Purpose:
* Continue after finishing an ipc_mqueue_receive
@@ -876,10 +896,14 @@ exception_raise_continue_slow(
*/
while (thread_should_halt(self)) {
- /* don't terminate while holding a reference */
+ /* if thread is about to terminate, release the port */
if (self->ast & AST_TERMINATE)
ipc_port_release(reply_port);
- thread_halt_self();
+ /*
+ * Use the continuation to release the port in
+ * case the thread is about to halt.
+ */
+ thread_halt_self(thread_release_and_exception_return);
}
ip_lock(reply_port);
diff --git a/kern/machine.c b/kern/machine.c
index eced768..3f7a7f7 100644
--- a/kern/machine.c
+++ b/kern/machine.c
@@ -270,7 +270,7 @@ Retry:
assert_wait((event_t) processor, TRUE);
processor_unlock(processor);
splx(s);
- thread_block((void(*)()) 0);
+ thread_block(thread_no_continuation);
goto Retry;
}
@@ -299,7 +299,7 @@ Retry:
assert_wait((event_t)processor, TRUE);
processor_unlock(processor);
splx(s);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
s = splsched();
processor_lock(processor);
}
@@ -415,7 +415,7 @@ void processor_doaction(processor_t processor)
*/
this_thread = current_thread();
thread_bind(this_thread, processor);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
pset = processor->processor_set;
#if MACH_HOST
@@ -572,7 +572,7 @@ Restart_pset:
thread_deallocate(prev_thread);
thread_bind(this_thread, PROCESSOR_NULL);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
return;
}
diff --git a/kern/profile.c b/kern/profile.c
index 5510721..1381b1a 100644
--- a/kern/profile.c
+++ b/kern/profile.c
@@ -172,7 +172,7 @@ printf("profile_thread: mach_msg failed returned %x\n",(int)mr);
sizeof(struct buf_to_send));
}
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
}
@@ -213,7 +213,7 @@ thread_t th;
thread_wakeup((event_t) profile_thread);
assert_wait((event_t) &buf_entry->wakeme, TRUE);
splx(s);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
} else {
splx(s);
kmem_free(kernel_map, vm_buf_entry, sizeof(struct buf_to_send));
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
index e8f260e..580ca43 100644
--- a/kern/sched_prim.c
+++ b/kern/sched_prim.c
@@ -454,7 +454,7 @@ void thread_sleep(
{
assert_wait(event, interruptible); /* assert event */
simple_unlock(lock); /* release the lock */
- thread_block((void (*)()) 0); /* block ourselves */
+ thread_block(thread_no_continuation); /* block ourselves */
}
/*
@@ -617,7 +617,7 @@ boolean_t thread_invoke(
thread_unlock(new_thread);
thread_wakeup(TH_EV_STATE(new_thread));
- if (continuation != (void (*)()) 0) {
+ if (continuation != thread_no_continuation) {
(void) spl0();
call_continuation(continuation);
/*NOTREACHED*/
@@ -630,7 +630,7 @@ boolean_t thread_invoke(
*/
thread_lock(new_thread);
if ((old_thread->stack_privilege != current_stack()) &&
- (continuation != (void (*)()) 0))
+ (continuation != thread_no_continuation))
{
switch (new_thread->state & TH_SWAP_STATE) {
case TH_SWAPPED:
@@ -915,7 +915,7 @@ void thread_dispatch(
thread_lock(thread);
- if (thread->swap_func != (void (*)()) 0) {
+ if (thread->swap_func != thread_no_continuation) {
assert((thread->state & TH_SWAP_STATE) == 0);
thread->state |= TH_SWAPPED;
stack_free(thread);
diff --git a/kern/sched_prim.h b/kern/sched_prim.h
index 62698dc..bb1865c 100644
--- a/kern/sched_prim.h
+++ b/kern/sched_prim.h
@@ -52,6 +52,8 @@ typedef void *event_t; /* wait event */
typedef void (*continuation_t)(void); /* continuation */
+#define thread_no_continuation ((continuation_t) 0) /* no continuation */
+
/*
* Exported interface to sched_prim.c.
*/
diff --git a/kern/task.c b/kern/task.c
index 9a3d848..e9e6ba2 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -377,7 +377,7 @@ kern_return_t task_terminate(
task_unlock(task);
thread_force_terminate(thread);
thread_deallocate(thread);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
task_lock(task);
}
task_unlock(task);
@@ -893,7 +893,7 @@ task_assign(
task->assign_active = TRUE;
assert_wait((event_t)&task->assign_active, TRUE);
task_unlock(task);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
task_lock(task);
}
diff --git a/kern/thread.c b/kern/thread.c
index 865a1cc..3e90079 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -1132,7 +1132,7 @@ void __attribute__((noreturn)) walking_zombie(void)
* Thread calls this routine on exit from the kernel when it
* notices a halt request.
*/
-void thread_halt_self(void)
+void thread_halt_self(continuation_t continuation)
{
thread_t thread = current_thread();
spl_t s;
@@ -1173,7 +1173,7 @@ void thread_halt_self(void)
thread_unlock(thread);
splx(s);
counter(c_thread_halt_self_block++);
- thread_block(thread_exception_return);
+ thread_block(continuation);
/*
* thread_release resets TH_HALTED.
*/
@@ -1348,7 +1348,7 @@ kern_return_t thread_suspend(
while (thread->state & TH_UNINT) {
assert_wait(TH_EV_STATE(thread), TRUE);
thread_unlock(thread);
- thread_block(NULL);
+ thread_block(thread_no_continuation);
thread_lock(thread);
}
if (thread->user_stop_count++ == 0) {
diff --git a/kern/thread.h b/kern/thread.h
index 0e85d8c..7106fd2 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -100,7 +100,7 @@ struct thread {
vm_offset_t stack_privilege;/* reserved kernel stack */
/* Swapping information */
- void (*swap_func)(); /* start here after swapin */
+ continuation_t swap_func; /* start here after swapin */
/* Blocking information */
event_t wait_event; /* event we are waiting on */
@@ -362,7 +362,7 @@ extern void thread_release(thread_t);
extern kern_return_t thread_halt(
thread_t thread,
boolean_t must_halt);
-extern void thread_halt_self(void);
+extern void thread_halt_self(continuation_t);
extern void thread_force_terminate(thread_t);
extern thread_t kernel_thread(
task_t task,