Relicense Sortix to the ISC license.
I hereby relicense all my work on Sortix under the ISC license as below.
All Sortix contributions by other people are already under this license,
are not substantial enough to be copyrightable, or have been removed.
All imported code from other projects is compatible with this license.
All GPL licensed code from other projects had previously been removed.
Copyright 2011-2016 Jonas 'Sortie' Termansen and contributors.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2016-03-02 17:38:16 -05:00
|
|
|
/*
|
2021-02-06 15:23:43 -05:00
|
|
|
* Copyright (c) 2011-2016, 2018, 2021 Jonas 'Sortie' Termansen.
|
Relicense Sortix to the ISC license.
I hereby relicense all my work on Sortix under the ISC license as below.
All Sortix contributions by other people are already under this license,
are not substantial enough to be copyrightable, or have been removed.
All imported code from other projects is compatible with this license.
All GPL licensed code from other projects had previously been removed.
Copyright 2011-2016 Jonas 'Sortie' Termansen and contributors.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2016-03-02 17:38:16 -05:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*
|
|
|
|
* thread.cpp
|
|
|
|
* Describes a thread belonging to a process.
|
|
|
|
*/
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
#include <sys/wait.h>
|
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
2021-02-06 15:23:43 -05:00
|
|
|
#include <limits.h>
|
2013-08-04 14:24:59 -04:00
|
|
|
#include <signal.h>
|
2014-03-03 18:11:13 -05:00
|
|
|
#include <stdlib.h>
|
2013-07-27 06:56:40 -04:00
|
|
|
#include <string.h>
|
2021-02-06 15:23:43 -05:00
|
|
|
#include <timespec.h>
|
2013-07-27 06:56:40 -04:00
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
#include <sortix/clock.h>
|
2013-08-31 13:35:17 -04:00
|
|
|
#include <sortix/exit.h>
|
2021-02-06 15:23:43 -05:00
|
|
|
#include <sortix/futex.h>
|
2013-07-27 06:56:40 -04:00
|
|
|
#include <sortix/mman.h>
|
|
|
|
#include <sortix/signal.h>
|
|
|
|
|
2013-08-31 13:35:17 -04:00
|
|
|
#include <sortix/kernel/copy.h>
|
2013-07-27 06:56:40 -04:00
|
|
|
#include <sortix/kernel/interrupt.h>
|
2021-02-06 15:23:43 -05:00
|
|
|
#include <sortix/kernel/ioctx.h>
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/kernel/kernel.h>
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
#include <sortix/kernel/kthread.h>
|
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
2013-05-12 18:41:30 -04:00
|
|
|
#include <sortix/kernel/process.h>
|
2013-07-27 06:56:40 -04:00
|
|
|
#include <sortix/kernel/scheduler.h>
|
|
|
|
#include <sortix/kernel/syscall.h>
|
2013-05-12 18:41:30 -04:00
|
|
|
#include <sortix/kernel/thread.h>
|
2013-07-27 06:56:40 -04:00
|
|
|
#include <sortix/kernel/time.h>
|
2013-01-08 18:41:35 -05:00
|
|
|
|
2016-03-26 11:04:05 -04:00
|
|
|
#if defined(__i386__) || defined(__x86_64__)
|
|
|
|
#include "x86-family/float.h"
|
|
|
|
#endif
|
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
void* operator new (size_t /*size*/, void* address) throw()
|
|
|
|
{
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
namespace Sortix {
|
2013-01-08 18:41:35 -05:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
Thread* AllocateThread()
|
|
|
|
{
|
|
|
|
uint8_t* allocation = (uint8_t*) malloc(sizeof(class Thread) + 16);
|
|
|
|
if ( !allocation )
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
uint8_t* aligned = allocation;
|
|
|
|
if ( ((uintptr_t) aligned & 0xFUL) )
|
|
|
|
aligned = (uint8_t*) (((uintptr_t) aligned + 16) & ~0xFUL);
|
|
|
|
|
|
|
|
assert(!((uintptr_t) aligned & 0xFUL));
|
|
|
|
Thread* thread = new (aligned) Thread;
|
|
|
|
assert(!((uintptr_t) thread->registers.fpuenv & 0xFUL));
|
|
|
|
return thread->self_allocation = allocation, thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FreeThread(Thread* thread)
|
|
|
|
{
|
|
|
|
uint8_t* allocation = thread->self_allocation;
|
|
|
|
thread->~Thread();
|
|
|
|
free(allocation);
|
|
|
|
}
|
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
Thread::Thread()
|
2011-09-21 14:52:29 -04:00
|
|
|
{
|
2014-03-03 18:11:13 -05:00
|
|
|
assert(!((uintptr_t) registers.fpuenv & 0xFUL));
|
2021-02-06 15:23:43 -05:00
|
|
|
name = "";
|
2014-10-02 09:38:06 -04:00
|
|
|
system_tid = (uintptr_t) this;
|
|
|
|
yield_to_tid = 0;
|
2013-07-27 06:56:40 -04:00
|
|
|
id = 0; // TODO: Make a thread id.
|
|
|
|
process = NULL;
|
|
|
|
prevsibling = NULL;
|
|
|
|
nextsibling = NULL;
|
2014-02-21 11:05:10 -05:00
|
|
|
scheduler_list_prev = NULL;
|
|
|
|
scheduler_list_next = NULL;
|
2013-07-27 06:56:40 -04:00
|
|
|
state = NONE;
|
|
|
|
memset(®isters, 0, sizeof(registers));
|
|
|
|
kernelstackpos = 0;
|
|
|
|
kernelstacksize = 0;
|
2016-05-13 19:14:26 -04:00
|
|
|
signal_count = 0;
|
|
|
|
signal_single_frame = 0;
|
|
|
|
signal_canary = 0;
|
2013-07-27 06:56:40 -04:00
|
|
|
kernelstackmalloced = false;
|
2013-08-04 14:24:59 -04:00
|
|
|
pledged_destruction = false;
|
2015-08-14 12:24:40 -04:00
|
|
|
force_no_signals = false;
|
2016-05-13 19:14:26 -04:00
|
|
|
signal_single = false;
|
2018-10-20 06:57:31 -04:00
|
|
|
has_saved_signal_mask = false;
|
2013-08-04 14:24:59 -04:00
|
|
|
sigemptyset(&signal_pending);
|
|
|
|
sigemptyset(&signal_mask);
|
2018-10-20 06:57:31 -04:00
|
|
|
sigemptyset(&saved_signal_mask);
|
2013-08-04 14:24:59 -04:00
|
|
|
memset(&signal_stack, 0, sizeof(signal_stack));
|
|
|
|
signal_stack.ss_flags = SS_DISABLE;
|
2016-01-08 19:17:08 -05:00
|
|
|
// execute_clock initialized in member constructor.
|
|
|
|
// system_clock initialized in member constructor.
|
|
|
|
Time::InitializeThreadClocks(this);
|
2021-02-06 15:23:43 -05:00
|
|
|
futex_address = 0;
|
|
|
|
futex_woken = false;
|
|
|
|
futex_prev_waiting = NULL;
|
|
|
|
futex_next_waiting = NULL;
|
|
|
|
yield_operation = YIELD_OPERATION_NONE;
|
2013-07-27 06:56:40 -04:00
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
Thread::~Thread()
|
|
|
|
{
|
|
|
|
if ( process )
|
|
|
|
process->OnThreadDestruction(this);
|
|
|
|
assert(CurrentThread() != this);
|
|
|
|
if ( kernelstackmalloced )
|
|
|
|
delete[] (uint8_t*) kernelstackpos;
|
|
|
|
}
|
2011-11-06 16:00:29 -05:00
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* CreateKernelThread(Process* process,
|
|
|
|
struct thread_registers* regs,
|
|
|
|
const char* name)
|
2014-03-02 18:08:01 -05:00
|
|
|
{
|
2014-03-03 18:11:13 -05:00
|
|
|
assert(process && regs && process->addrspace);
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-09-14 12:59:15 -04:00
|
|
|
#if defined(__x86_64__)
|
2014-03-03 18:11:13 -05:00
|
|
|
if ( regs->fsbase >> 48 != 0x0000 && regs->fsbase >> 48 != 0xFFFF )
|
2013-09-14 12:59:15 -04:00
|
|
|
return errno = EINVAL, (Thread*) NULL;
|
2014-03-03 18:11:13 -05:00
|
|
|
if ( regs->gsbase >> 48 != 0x0000 && regs->gsbase >> 48 != 0xFFFF )
|
2013-09-14 12:59:15 -04:00
|
|
|
return errno = EINVAL, (Thread*) NULL;
|
|
|
|
#endif
|
|
|
|
|
2018-07-17 10:25:58 -04:00
|
|
|
kthread_mutex_lock(&process->threadlock);
|
|
|
|
|
|
|
|
// Note: Only allow the process itself to make threads, except the initial
|
|
|
|
// thread. This requirement is because kthread_exit() needs to know when
|
|
|
|
// it's the last thread in the process (using threads_not_exiting_count),
|
|
|
|
// and that no more threads will appear, so it can run some final process
|
2018-08-12 17:24:42 -04:00
|
|
|
// termination steps without any interference. It's always allowed to create
|
|
|
|
// threads in the kernel process as it never exits.
|
|
|
|
assert(!process->firstthread ||
|
|
|
|
process == CurrentProcess() ||
|
|
|
|
process == Scheduler::GetKernelProcess());
|
2018-07-17 10:25:58 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
Thread* thread = AllocateThread();
|
2013-07-27 06:56:40 -04:00
|
|
|
if ( !thread )
|
|
|
|
return NULL;
|
2021-02-06 15:23:43 -05:00
|
|
|
thread->name = name;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
memcpy(&thread->registers, regs, sizeof(struct thread_registers));
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
// Create the family tree.
|
|
|
|
thread->process = process;
|
|
|
|
Thread* firsty = process->firstthread;
|
|
|
|
if ( firsty )
|
|
|
|
firsty->prevsibling = thread;
|
|
|
|
thread->nextsibling = firsty;
|
|
|
|
process->firstthread = thread;
|
2018-07-17 10:25:58 -04:00
|
|
|
process->threads_not_exiting_count++;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
kthread_mutex_unlock(&process->threadlock);
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
return thread;
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
static void SetupKernelThreadRegs(struct thread_registers* regs,
|
|
|
|
Process* process,
|
2014-03-02 18:08:01 -05:00
|
|
|
void (*entry)(void*),
|
|
|
|
void* user,
|
|
|
|
uintptr_t stack,
|
|
|
|
size_t stack_size)
|
|
|
|
{
|
2014-03-03 18:11:13 -05:00
|
|
|
memset(regs, 0, sizeof(*regs));
|
|
|
|
|
|
|
|
size_t stack_alignment = 16;
|
|
|
|
while ( stack & (stack_alignment-1) )
|
|
|
|
{
|
|
|
|
assert(stack_size);
|
|
|
|
stack++;
|
|
|
|
stack_size--;
|
|
|
|
}
|
|
|
|
|
|
|
|
stack_size &= ~(stack_alignment-1);
|
|
|
|
|
2014-03-02 18:08:01 -05:00
|
|
|
#if defined(__i386__)
|
|
|
|
uintptr_t* stack_values = (uintptr_t*) (stack + stack_size);
|
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
assert(5 * sizeof(uintptr_t) <= stack_size);
|
2014-03-02 18:08:01 -05:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
/* -- 16-byte aligned -- */
|
|
|
|
/* -1 padding */
|
|
|
|
stack_values[-2] = (uintptr_t) 0; /* null eip */
|
|
|
|
stack_values[-3] = (uintptr_t) 0; /* null ebp */
|
|
|
|
stack_values[-4] = (uintptr_t) user; /* thread parameter */
|
|
|
|
/* -- 16-byte aligned -- */
|
|
|
|
stack_values[-5] = (uintptr_t) kthread_exit; /* return to kthread_exit */
|
|
|
|
/* upcoming ebp */
|
|
|
|
/* -7 padding */
|
|
|
|
/* -8 padding */
|
|
|
|
/* -- 16-byte aligned -- */
|
2014-03-02 18:08:01 -05:00
|
|
|
|
|
|
|
regs->eip = (uintptr_t) entry;
|
2014-03-03 18:11:13 -05:00
|
|
|
regs->esp = (uintptr_t) (stack_values - 5);
|
2014-03-02 18:08:01 -05:00
|
|
|
regs->eax = 0;
|
|
|
|
regs->ebx = 0;
|
|
|
|
regs->ecx = 0;
|
|
|
|
regs->edx = 0;
|
|
|
|
regs->edi = 0;
|
|
|
|
regs->esi = 0;
|
2014-03-03 18:11:13 -05:00
|
|
|
regs->ebp = (uintptr_t) (stack_values - 3);
|
2014-03-02 18:08:01 -05:00
|
|
|
regs->cs = KCS | KRPL;
|
|
|
|
regs->ds = KDS | KRPL;
|
|
|
|
regs->ss = KDS | KRPL;
|
|
|
|
regs->eflags = FLAGS_RESERVED1 | FLAGS_INTERRUPT | FLAGS_ID;
|
|
|
|
regs->kerrno = 0;
|
|
|
|
regs->signal_pending = 0;
|
2014-03-03 18:11:13 -05:00
|
|
|
regs->kernel_stack = stack + stack_size;
|
|
|
|
regs->cr3 = process->addrspace;
|
2016-03-26 11:04:05 -04:00
|
|
|
memcpy(regs->fpuenv, Float::fpu_initialized_regs, 512);
|
2014-03-02 18:08:01 -05:00
|
|
|
#elif defined(__x86_64__)
|
|
|
|
uintptr_t* stack_values = (uintptr_t*) (stack + stack_size);
|
|
|
|
|
|
|
|
assert(3 * sizeof(uintptr_t) <= stack_size);
|
|
|
|
|
|
|
|
stack_values[-1] = (uintptr_t) 0; /* null rip */
|
|
|
|
stack_values[-2] = (uintptr_t) 0; /* null rbp */
|
|
|
|
stack_values[-3] = (uintptr_t) kthread_exit; /* return to kthread_exit */
|
|
|
|
|
|
|
|
regs->rip = (uintptr_t) entry;
|
|
|
|
regs->rsp = (uintptr_t) (stack_values - 3);
|
|
|
|
regs->rax = 0;
|
|
|
|
regs->rbx = 0;
|
|
|
|
regs->rcx = 0;
|
|
|
|
regs->rdx = 0;
|
|
|
|
regs->rdi = (uintptr_t) user;
|
|
|
|
regs->rsi = 0;
|
|
|
|
regs->rbp = 0;
|
|
|
|
regs->r8 = 0;
|
|
|
|
regs->r9 = 0;
|
|
|
|
regs->r10 = 0;
|
|
|
|
regs->r11 = 0;
|
|
|
|
regs->r12 = 0;
|
|
|
|
regs->r13 = 0;
|
|
|
|
regs->r14 = 0;
|
|
|
|
regs->r15 = 0;
|
|
|
|
regs->cs = KCS | KRPL;
|
|
|
|
regs->ds = KDS | KRPL;
|
|
|
|
regs->ss = KDS | KRPL;
|
|
|
|
regs->rflags = FLAGS_RESERVED1 | FLAGS_INTERRUPT | FLAGS_ID;
|
|
|
|
regs->kerrno = 0;
|
|
|
|
regs->signal_pending = 0;
|
2014-03-03 18:11:13 -05:00
|
|
|
regs->kernel_stack = stack + stack_size;
|
|
|
|
regs->cr3 = process->addrspace;
|
2016-03-26 11:04:05 -04:00
|
|
|
memcpy(regs->fpuenv, Float::fpu_initialized_regs, 512);
|
2014-03-02 18:08:01 -05:00
|
|
|
#else
|
2014-03-03 18:11:13 -05:00
|
|
|
#warning "You need to add kernel thread register initialization support"
|
2014-03-02 18:08:01 -05:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread* CreateKernelThread(Process* process, void (*entry)(void*), void* user,
|
2021-02-06 15:23:43 -05:00
|
|
|
const char* name, size_t stacksize)
|
2013-07-27 06:56:40 -04:00
|
|
|
{
|
2013-08-22 10:17:24 -04:00
|
|
|
const size_t DEFAULT_KERNEL_STACK_SIZE = 8 * 1024UL;
|
2013-07-27 06:56:40 -04:00
|
|
|
if ( !stacksize )
|
|
|
|
stacksize = DEFAULT_KERNEL_STACK_SIZE;
|
|
|
|
uint8_t* stack = new uint8_t[stacksize];
|
|
|
|
if ( !stack )
|
|
|
|
return NULL;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
struct thread_registers regs;
|
2021-02-06 15:23:43 -05:00
|
|
|
SetupKernelThreadRegs(®s, process, entry, user, (uintptr_t) stack,
|
|
|
|
stacksize);
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* thread = CreateKernelThread(process, ®s, name);
|
2013-07-27 06:56:40 -04:00
|
|
|
if ( !thread ) { delete[] stack; return NULL; }
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-03-02 18:08:01 -05:00
|
|
|
thread->kernelstackpos = (uintptr_t) stack;
|
2013-07-27 06:56:40 -04:00
|
|
|
thread->kernelstacksize = stacksize;
|
|
|
|
thread->kernelstackmalloced = true;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
return thread;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* CreateKernelThread(void (*entry)(void*), void* user, const char* name,
|
|
|
|
size_t stacksize)
|
2013-07-27 06:56:40 -04:00
|
|
|
{
|
2021-02-06 15:23:43 -05:00
|
|
|
return CreateKernelThread(CurrentProcess(), entry, user, name, stacksize);
|
2013-07-27 06:56:40 -04:00
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
void StartKernelThread(Thread* thread)
|
|
|
|
{
|
|
|
|
Scheduler::SetThreadState(thread, ThreadState::RUNNABLE);
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* RunKernelThread(Process* process, struct thread_registers* regs,
|
|
|
|
const char* name)
|
2013-07-27 06:56:40 -04:00
|
|
|
{
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* thread = CreateKernelThread(process, regs, name);
|
2013-07-27 06:56:40 -04:00
|
|
|
if ( !thread )
|
|
|
|
return NULL;
|
|
|
|
StartKernelThread(thread);
|
|
|
|
return thread;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-03-02 18:08:01 -05:00
|
|
|
Thread* RunKernelThread(Process* process, void (*entry)(void*), void* user,
|
2021-02-06 15:23:43 -05:00
|
|
|
const char* name, size_t stacksize)
|
2013-07-27 06:56:40 -04:00
|
|
|
{
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* thread = CreateKernelThread(process, entry, user, name, stacksize);
|
2013-07-27 06:56:40 -04:00
|
|
|
if ( !thread )
|
|
|
|
return NULL;
|
|
|
|
StartKernelThread(thread);
|
|
|
|
return thread;
|
|
|
|
}
|
2012-02-10 07:27:11 -05:00
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* RunKernelThread(void (*entry)(void*), void* user, const char* name,
|
|
|
|
size_t stacksize)
|
2013-07-27 06:56:40 -04:00
|
|
|
{
|
2021-02-06 15:23:43 -05:00
|
|
|
Thread* thread = CreateKernelThread(entry, user, name, stacksize);
|
2013-07-27 06:56:40 -04:00
|
|
|
if ( !thread )
|
|
|
|
return NULL;
|
|
|
|
StartKernelThread(thread);
|
|
|
|
return thread;
|
|
|
|
}
|
2011-09-15 16:38:40 -04:00
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
int sys_exit_thread(int requested_exit_code,
|
|
|
|
int flags,
|
|
|
|
const struct exit_thread* user_extended)
|
2013-08-31 13:35:17 -04:00
|
|
|
{
|
|
|
|
if ( flags & ~(EXIT_THREAD_ONLY_IF_OTHERS |
|
|
|
|
EXIT_THREAD_UNMAP |
|
2013-08-04 14:24:59 -04:00
|
|
|
EXIT_THREAD_ZERO |
|
|
|
|
EXIT_THREAD_TLS_UNMAP |
|
|
|
|
EXIT_THREAD_PROCESS |
|
2021-02-06 15:23:43 -05:00
|
|
|
EXIT_THREAD_DUMP_CORE |
|
|
|
|
EXIT_THREAD_FUTEX_WAKE) )
|
2013-08-04 14:24:59 -04:00
|
|
|
return errno = EINVAL, -1;
|
|
|
|
|
|
|
|
if ( (flags & EXIT_THREAD_ONLY_IF_OTHERS) && (flags & EXIT_THREAD_PROCESS) )
|
2013-08-31 13:35:17 -04:00
|
|
|
return errno = EINVAL, -1;
|
|
|
|
|
|
|
|
Thread* thread = CurrentThread();
|
|
|
|
Process* process = CurrentProcess();
|
|
|
|
|
|
|
|
struct exit_thread extended;
|
|
|
|
if ( !user_extended )
|
|
|
|
memset(&extended, 0, sizeof(extended));
|
|
|
|
else if ( !CopyFromUser(&extended, user_extended, sizeof(extended)) )
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
extended.unmap_size = Page::AlignUp(extended.unmap_size);
|
|
|
|
|
|
|
|
kthread_mutex_lock(&thread->process->threadlock);
|
|
|
|
bool is_others = false;
|
|
|
|
for ( Thread* iter = thread->process->firstthread;
|
|
|
|
!is_others && iter;
|
|
|
|
iter = iter->nextsibling )
|
|
|
|
{
|
|
|
|
if ( iter == thread )
|
|
|
|
continue;
|
2013-08-04 14:24:59 -04:00
|
|
|
if ( iter->pledged_destruction )
|
|
|
|
continue;
|
2013-08-31 13:35:17 -04:00
|
|
|
is_others = true;
|
|
|
|
}
|
2013-08-04 14:24:59 -04:00
|
|
|
if ( !(flags & EXIT_THREAD_ONLY_IF_OTHERS) || is_others )
|
|
|
|
thread->pledged_destruction = true;
|
|
|
|
bool are_threads_exiting = false;
|
2016-10-09 16:56:30 -04:00
|
|
|
bool do_exit = (flags & EXIT_THREAD_PROCESS) || !is_others;
|
|
|
|
if ( do_exit )
|
2013-08-04 14:24:59 -04:00
|
|
|
process->threads_exiting = true;
|
|
|
|
else if ( process->threads_exiting )
|
|
|
|
are_threads_exiting = true;
|
2013-08-31 13:35:17 -04:00
|
|
|
kthread_mutex_unlock(&thread->process->threadlock);
|
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
// Self-destruct if another thread began exiting the process.
|
|
|
|
if ( are_threads_exiting )
|
|
|
|
kthread_exit();
|
|
|
|
|
2013-08-31 13:35:17 -04:00
|
|
|
if ( (flags & EXIT_THREAD_ONLY_IF_OTHERS) && !is_others )
|
|
|
|
return errno = ESRCH, -1;
|
|
|
|
|
|
|
|
if ( flags & EXIT_THREAD_UNMAP &&
|
|
|
|
Page::IsAligned((uintptr_t) extended.unmap_from) &&
|
|
|
|
extended.unmap_size )
|
|
|
|
{
|
|
|
|
ScopedLock lock(&process->segment_lock);
|
2015-02-16 08:22:45 -05:00
|
|
|
extended.unmap_size = Page::AlignDown(extended.unmap_size);
|
2013-08-31 13:35:17 -04:00
|
|
|
Memory::UnmapMemory(process, (uintptr_t) extended.unmap_from,
|
|
|
|
extended.unmap_size);
|
2014-06-24 18:42:25 -04:00
|
|
|
Memory::Flush();
|
2013-08-04 14:24:59 -04:00
|
|
|
// TODO: The segment is not actually removed!
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( flags & EXIT_THREAD_TLS_UNMAP &&
|
|
|
|
Page::IsAligned((uintptr_t) extended.tls_unmap_from) &&
|
|
|
|
extended.tls_unmap_size )
|
|
|
|
{
|
|
|
|
ScopedLock lock(&process->segment_lock);
|
2015-02-16 08:22:45 -05:00
|
|
|
extended.tls_unmap_size = Page::AlignDown(extended.tls_unmap_size);
|
2013-08-04 14:24:59 -04:00
|
|
|
Memory::UnmapMemory(process, (uintptr_t) extended.tls_unmap_from,
|
|
|
|
extended.tls_unmap_size);
|
|
|
|
Memory::Flush();
|
2013-08-31 13:35:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if ( flags & EXIT_THREAD_ZERO )
|
|
|
|
ZeroUser(extended.zero_from, extended.zero_size);
|
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
if ( flags & EXIT_THREAD_FUTEX_WAKE )
|
|
|
|
sys_futex((int*) extended.zero_from, FUTEX_WAKE, 1, NULL);
|
|
|
|
|
2016-10-09 16:56:30 -04:00
|
|
|
if ( do_exit )
|
2013-08-04 14:24:59 -04:00
|
|
|
{
|
|
|
|
// Validate the requested exit code such that the process can't exit
|
|
|
|
// with an impossible exit status or that it wasn't actually terminated.
|
|
|
|
|
|
|
|
int the_nature = WNATURE(requested_exit_code);
|
|
|
|
int the_status = WEXITSTATUS(requested_exit_code);
|
|
|
|
int the_signal = WTERMSIG(requested_exit_code);
|
|
|
|
|
|
|
|
if ( the_nature == WNATURE_EXITED )
|
|
|
|
the_signal = 0;
|
|
|
|
else if ( the_nature == WNATURE_SIGNALED )
|
|
|
|
{
|
|
|
|
if ( the_signal == 0 /* null signal */ ||
|
|
|
|
the_signal == SIGSTOP ||
|
|
|
|
the_signal == SIGTSTP ||
|
|
|
|
the_signal == SIGTTIN ||
|
|
|
|
the_signal == SIGTTOU ||
|
|
|
|
the_signal == SIGCONT )
|
|
|
|
the_signal = SIGKILL;
|
|
|
|
the_status = 128 + the_signal;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
the_nature = WNATURE_SIGNALED;
|
|
|
|
the_signal = SIGKILL;
|
|
|
|
}
|
|
|
|
|
|
|
|
requested_exit_code = WCONSTRUCT(the_nature, the_status, the_signal);
|
|
|
|
|
|
|
|
thread->process->ExitWithCode(requested_exit_code);
|
|
|
|
}
|
2013-08-31 13:35:17 -04:00
|
|
|
|
|
|
|
kthread_exit();
|
|
|
|
}
|
|
|
|
|
2021-02-06 15:23:43 -05:00
|
|
|
static void futex_timeout(Clock* /*clock*/, Timer* /*timer*/, void* ctx)
|
|
|
|
{
|
|
|
|
Thread* thread = (Thread*) ctx;
|
|
|
|
thread->timer_woken = true;
|
|
|
|
kthread_wake_futex(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
int sys_futex(int* user_address,
|
|
|
|
int op,
|
|
|
|
int value,
|
|
|
|
const struct timespec* user_timeout)
|
|
|
|
{
|
|
|
|
ioctx_t ctx; SetupKernelIOCtx(&ctx);
|
|
|
|
Thread* thread = CurrentThread();
|
|
|
|
Process* process = thread->process;
|
|
|
|
if ( FUTEX_GET_OP(op) == FUTEX_WAIT )
|
|
|
|
{
|
|
|
|
kthread_mutex_lock(&process->futex_lock);
|
|
|
|
thread->futex_address = (uintptr_t) user_address;
|
|
|
|
thread->futex_woken = false;
|
|
|
|
thread->futex_prev_waiting = process->futex_last_waiting;
|
|
|
|
thread->futex_next_waiting = NULL;
|
|
|
|
(process->futex_last_waiting ?
|
|
|
|
process->futex_last_waiting->futex_next_waiting :
|
|
|
|
process->futex_first_waiting) = thread;
|
|
|
|
process->futex_last_waiting = thread;
|
|
|
|
kthread_mutex_unlock(&process->futex_lock);
|
|
|
|
thread->timer_woken = false;
|
|
|
|
Timer timer;
|
|
|
|
if ( user_timeout )
|
|
|
|
{
|
|
|
|
clockid_t clockid = FUTEX_GET_CLOCK(op);
|
|
|
|
bool absolute = op & FUTEX_ABSOLUTE;
|
|
|
|
struct timespec timeout;
|
|
|
|
if ( !CopyFromUser(&timeout, user_timeout, sizeof(timeout)) )
|
|
|
|
return -1;
|
|
|
|
if ( !timespec_is_canonical(timeout) )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
Clock* clock = Time::GetClock(clockid);
|
|
|
|
timer.Attach(clock);
|
|
|
|
struct itimerspec timerspec;
|
|
|
|
timerspec.it_value = timeout;
|
|
|
|
timerspec.it_interval.tv_sec = 0;
|
|
|
|
timerspec.it_interval.tv_nsec = 0;
|
|
|
|
int timer_flags = (absolute ? TIMER_ABSOLUTE : 0) |
|
|
|
|
TIMER_FUNC_INTERRUPT_HANDLER;
|
|
|
|
timer.Set(&timerspec, NULL, timer_flags, futex_timeout, thread);
|
|
|
|
}
|
|
|
|
int result = 0;
|
|
|
|
int current;
|
|
|
|
if ( !ReadAtomicFromUser(¤t, user_address) )
|
|
|
|
result = -1;
|
|
|
|
else if ( current != value )
|
|
|
|
{
|
|
|
|
errno = EAGAIN;
|
|
|
|
result = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
kthread_wait_futex_signal();
|
|
|
|
if ( user_timeout )
|
|
|
|
timer.Cancel();
|
|
|
|
kthread_mutex_lock(&process->futex_lock);
|
|
|
|
if ( result == 0 && !thread->futex_woken )
|
|
|
|
{
|
|
|
|
if ( Signal::IsPending() )
|
|
|
|
{
|
|
|
|
errno = EINTR;
|
|
|
|
result = -1;
|
|
|
|
}
|
|
|
|
else if ( thread->timer_woken )
|
|
|
|
{
|
|
|
|
errno = ETIMEDOUT;
|
|
|
|
result = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
thread->futex_address = 0;
|
|
|
|
thread->futex_woken = false;
|
|
|
|
(thread->futex_prev_waiting ?
|
|
|
|
thread->futex_prev_waiting->futex_next_waiting :
|
|
|
|
process->futex_first_waiting) = thread->futex_next_waiting;
|
|
|
|
(thread->futex_next_waiting ?
|
|
|
|
thread->futex_next_waiting->futex_prev_waiting :
|
|
|
|
process->futex_last_waiting) = thread->futex_prev_waiting;
|
|
|
|
thread->futex_prev_waiting = NULL;
|
|
|
|
thread->futex_next_waiting = NULL;
|
|
|
|
kthread_mutex_unlock(&process->futex_lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else if ( FUTEX_GET_OP(op) == FUTEX_WAKE )
|
|
|
|
{
|
|
|
|
kthread_mutex_lock(&process->futex_lock);
|
|
|
|
int result = 0;
|
|
|
|
for ( Thread* waiter = process->futex_first_waiting;
|
|
|
|
0 < value && waiter;
|
|
|
|
waiter = waiter->futex_next_waiting )
|
|
|
|
{
|
|
|
|
if ( waiter->futex_address == (uintptr_t) user_address )
|
|
|
|
{
|
|
|
|
waiter->futex_woken = true;
|
|
|
|
kthread_wake_futex(waiter);
|
|
|
|
if ( value != INT_MAX )
|
|
|
|
value--;
|
|
|
|
if ( result != INT_MAX )
|
|
|
|
result++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kthread_mutex_unlock(&process->futex_lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
}
|
|
|
|
|
2013-07-27 06:56:40 -04:00
|
|
|
} // namespace Sortix
|