Relicense Sortix to the ISC license.
I hereby relicense all my work on Sortix under the ISC license as below.
All Sortix contributions by other people are already under this license,
are not substantial enough to be copyrightable, or have been removed.
All imported code from other projects is compatible with this license.
All GPL licensed code from other projects had previously been removed.
Copyright 2011-2016 Jonas 'Sortie' Termansen and contributors.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2016-03-02 17:38:16 -05:00
|
|
|
/*
|
2016-03-26 11:04:05 -04:00
|
|
|
* Copyright (c) 2011, 2012, 2013, 2014, 2015, 2016 Jonas 'Sortie' Termansen.
|
Relicense Sortix to the ISC license.
I hereby relicense all my work on Sortix under the ISC license as below.
All Sortix contributions by other people are already under this license,
are not substantial enough to be copyrightable, or have been removed.
All imported code from other projects is compatible with this license.
All GPL licensed code from other projects had previously been removed.
Copyright 2011-2016 Jonas 'Sortie' Termansen and contributors.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2016-03-02 17:38:16 -05:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*
|
|
|
|
* process.cpp
|
|
|
|
* A named collection of threads.
|
|
|
|
*/
|
2011-08-27 10:46:00 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
#include <sys/wait.h>
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <assert.h>
|
2014-05-10 15:34:29 -04:00
|
|
|
#include <ctype.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <errno.h>
|
2014-05-10 11:21:27 -04:00
|
|
|
#include <limits.h>
|
2014-02-17 17:53:03 -05:00
|
|
|
#include <msr.h>
|
2015-10-27 12:34:54 -04:00
|
|
|
#include <scram.h>
|
2013-08-04 14:24:59 -04:00
|
|
|
#include <signal.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <stdint.h>
|
2013-08-19 20:23:53 -04:00
|
|
|
#include <stdlib.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include <sortix/clock.h>
|
|
|
|
#include <sortix/fcntl.h>
|
|
|
|
#include <sortix/fork.h>
|
|
|
|
#include <sortix/mman.h>
|
2013-08-30 17:55:09 -04:00
|
|
|
#include <sortix/resource.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/signal.h>
|
|
|
|
#include <sortix/stat.h>
|
|
|
|
#include <sortix/unistd.h>
|
2014-02-17 17:53:03 -05:00
|
|
|
#include <sortix/uthread.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/wait.h>
|
|
|
|
|
2014-05-14 08:41:51 -04:00
|
|
|
#include <sortix/kernel/addralloc.h>
|
2012-08-07 18:19:44 -04:00
|
|
|
#include <sortix/kernel/copy.h>
|
|
|
|
#include <sortix/kernel/descriptor.h>
|
|
|
|
#include <sortix/kernel/dtable.h>
|
2014-06-25 19:05:07 -04:00
|
|
|
#include <sortix/kernel/elf.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/kernel/ioctx.h>
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/kernel/kernel.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/kernel/kthread.h>
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/kernel/mtable.h>
|
|
|
|
#include <sortix/kernel/process.h>
|
2014-03-19 18:34:09 -04:00
|
|
|
#include <sortix/kernel/ptable.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/kernel/refcount.h>
|
2013-01-09 04:47:22 -05:00
|
|
|
#include <sortix/kernel/scheduler.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/kernel/sortedlist.h>
|
|
|
|
#include <sortix/kernel/string.h>
|
|
|
|
#include <sortix/kernel/syscall.h>
|
2013-05-12 18:41:30 -04:00
|
|
|
#include <sortix/kernel/thread.h>
|
2013-05-14 18:45:04 -04:00
|
|
|
#include <sortix/kernel/time.h>
|
2013-06-11 19:01:49 -04:00
|
|
|
#include <sortix/kernel/worker.h>
|
2013-01-08 18:41:35 -05:00
|
|
|
|
2014-02-17 17:53:03 -05:00
|
|
|
#if defined(__i386__) || defined(__x86_64__)
|
2014-03-03 18:11:13 -05:00
|
|
|
#include "x86-family/float.h"
|
2014-02-17 17:53:03 -05:00
|
|
|
#include "x86-family/gdt.h"
|
|
|
|
#endif
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
namespace Sortix {
|
2011-10-26 18:05:20 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
kthread_mutex_t process_family_lock = KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Process::Process()
|
|
|
|
{
|
2014-09-15 15:09:30 -04:00
|
|
|
program_image_path = NULL;
|
2013-06-11 19:01:49 -04:00
|
|
|
addrspace = 0;
|
2014-03-19 18:34:09 -04:00
|
|
|
pid = 0;
|
2014-09-15 15:09:30 -04:00
|
|
|
|
|
|
|
nicelock = KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
nice = 0;
|
|
|
|
|
|
|
|
idlock = KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
uid = euid = 0;
|
|
|
|
gid = egid = 0;
|
|
|
|
umask = 0022;
|
|
|
|
|
|
|
|
ptrlock = KTHREAD_MUTEX_INITIALIZER;
|
2016-11-15 15:08:41 -05:00
|
|
|
// tty set to null reference in the member constructor.
|
2014-09-15 15:09:30 -04:00
|
|
|
// root set to null reference in the member constructor.
|
|
|
|
// cwd set to null reference in the member constructor.
|
|
|
|
// mtable set to null reference in the member constructor.
|
|
|
|
// dtable set to null reference in the member constructor.
|
|
|
|
|
|
|
|
// ptable set to null reference in the member constructor.
|
|
|
|
|
|
|
|
resource_limits_lock = KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
for ( size_t i = 0; i < RLIMIT_NUM_DECLARED; i++ )
|
|
|
|
{
|
|
|
|
resource_limits[i].rlim_cur = RLIM_INFINITY;
|
|
|
|
resource_limits[i].rlim_max = RLIM_INFINITY;
|
|
|
|
}
|
|
|
|
|
|
|
|
signal_lock = KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
memset(&signal_actions, 0, sizeof(signal_actions));
|
|
|
|
for ( int i = 0; i < SIG_MAX_NUM; i++ )
|
|
|
|
{
|
|
|
|
sigemptyset(&signal_actions[i].sa_mask);
|
|
|
|
signal_actions[i].sa_handler = SIG_DFL;
|
|
|
|
signal_actions[i].sa_cookie = NULL;
|
|
|
|
signal_actions[i].sa_flags = 0;
|
|
|
|
}
|
|
|
|
sigemptyset(&signal_pending);
|
|
|
|
sigreturn = NULL;
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
parent = NULL;
|
|
|
|
prevsibling = NULL;
|
|
|
|
nextsibling = NULL;
|
|
|
|
firstchild = NULL;
|
|
|
|
zombiechild = NULL;
|
2013-06-11 19:02:01 -04:00
|
|
|
group = NULL;
|
|
|
|
groupprev = NULL;
|
|
|
|
groupnext = NULL;
|
|
|
|
groupfirst = NULL;
|
2016-11-15 15:08:41 -05:00
|
|
|
session = NULL;
|
|
|
|
sessionprev = NULL;
|
|
|
|
sessionnext = NULL;
|
|
|
|
sessionfirst = NULL;
|
|
|
|
zombiecond = KTHREAD_COND_INITIALIZER;
|
|
|
|
iszombie = false;
|
|
|
|
nozombify = false;
|
|
|
|
limbo = false;
|
|
|
|
exit_code = -1;
|
2014-09-15 15:09:30 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
firstthread = NULL;
|
|
|
|
threadlock = KTHREAD_MUTEX_INITIALIZER;
|
2013-08-04 14:24:59 -04:00
|
|
|
threads_exiting = false;
|
2014-09-15 15:09:30 -04:00
|
|
|
|
2013-08-19 20:23:53 -04:00
|
|
|
segments = NULL;
|
|
|
|
segments_used = 0;
|
|
|
|
segments_length = 0;
|
2015-05-14 09:19:23 -04:00
|
|
|
segment_write_lock = KTHREAD_MUTEX_INITIALIZER;
|
2013-08-19 20:23:53 -04:00
|
|
|
segment_lock = KTHREAD_MUTEX_INITIALIZER;
|
2014-09-15 15:09:30 -04:00
|
|
|
|
|
|
|
user_timers_lock = KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
memset(&user_timers, 0, sizeof(user_timers));
|
|
|
|
// alarm_timer initialized in member constructor.
|
|
|
|
// execute_clock initialized in member constructor.
|
|
|
|
// system_clock initialized in member constructor.
|
|
|
|
// child_execute_clock initialized in member constructor.
|
|
|
|
// child_system_clock initialized in member constructor.
|
2013-06-11 19:01:49 -04:00
|
|
|
Time::InitializeProcessClocks(this);
|
|
|
|
alarm_timer.Attach(Time::GetClock(CLOCK_MONOTONIC));
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
Process::~Process() // process_family_lock taken
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
if ( alarm_timer.IsAttached() )
|
|
|
|
alarm_timer.Detach();
|
2014-05-14 13:11:14 -04:00
|
|
|
delete[] program_image_path;
|
2013-06-11 19:01:49 -04:00
|
|
|
assert(!zombiechild);
|
2016-11-15 15:08:41 -05:00
|
|
|
assert(!session);
|
|
|
|
assert(!group);
|
|
|
|
assert(!parent);
|
|
|
|
assert(!sessionfirst);
|
|
|
|
assert(!groupfirst);
|
2013-06-11 19:01:49 -04:00
|
|
|
assert(!firstchild);
|
|
|
|
assert(!addrspace);
|
|
|
|
assert(!segments);
|
|
|
|
assert(!dtable);
|
|
|
|
assert(!mtable);
|
|
|
|
assert(!cwd);
|
|
|
|
assert(!root);
|
|
|
|
|
2014-03-19 18:34:09 -04:00
|
|
|
assert(ptable);
|
|
|
|
ptable->Free(pid);
|
|
|
|
ptable.Reset();
|
2016-11-15 15:08:41 -05:00
|
|
|
tty.Reset();
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
2011-08-27 10:46:00 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::BootstrapTables(Ref<DescriptorTable> dtable, Ref<MountTable> mtable)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(!this->dtable);
|
|
|
|
assert(!this->mtable);
|
|
|
|
this->dtable = dtable;
|
|
|
|
this->mtable = mtable;
|
|
|
|
}
|
2012-03-21 19:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::BootstrapDirectories(Ref<Descriptor> root)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(!this->root);
|
|
|
|
assert(!this->cwd);
|
|
|
|
this->root = root;
|
|
|
|
this->cwd = root;
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process__OnLastThreadExit(void* user);
|
2011-08-27 10:46:00 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::OnThreadDestruction(Thread* thread)
|
|
|
|
{
|
|
|
|
assert(thread->process == this);
|
|
|
|
kthread_mutex_lock(&threadlock);
|
|
|
|
if ( thread->prevsibling )
|
|
|
|
thread->prevsibling->nextsibling = thread->nextsibling;
|
|
|
|
if ( thread->nextsibling )
|
|
|
|
thread->nextsibling->prevsibling = thread->prevsibling;
|
|
|
|
if ( thread == firstthread )
|
|
|
|
firstthread = thread->nextsibling;
|
|
|
|
if ( firstthread )
|
|
|
|
firstthread->prevsibling = NULL;
|
|
|
|
thread->prevsibling = thread->nextsibling = NULL;
|
|
|
|
bool threadsleft = firstthread;
|
|
|
|
kthread_mutex_unlock(&threadlock);
|
|
|
|
|
|
|
|
// We are called from the threads destructor, let it finish before we
|
|
|
|
// we handle the situation by killing ourselves.
|
|
|
|
if ( !threadsleft )
|
|
|
|
ScheduleDeath();
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::ScheduleDeath()
|
|
|
|
{
|
|
|
|
// All our threads must have exited at this point.
|
|
|
|
assert(!firstthread);
|
|
|
|
Worker::Schedule(Process__OnLastThreadExit, this);
|
|
|
|
}
|
2013-05-11 19:24:42 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Useful for killing a partially constructed process without waiting for
|
|
|
|
// it to die and garbage collect its zombie. It is not safe to access this
|
|
|
|
// process after this call as another thread may garbage collect it.
|
|
|
|
void Process::AbortConstruction()
|
|
|
|
{
|
|
|
|
nozombify = true;
|
|
|
|
ScheduleDeath();
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process__OnLastThreadExit(void* user)
|
|
|
|
{
|
|
|
|
return ((Process*) user)->OnLastThreadExit();
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::OnLastThreadExit()
|
|
|
|
{
|
|
|
|
LastPrayer();
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::DeleteTimers()
|
|
|
|
{
|
|
|
|
for ( timer_t i = 0; i < PROCESS_TIMER_NUM_MAX; i++ )
|
|
|
|
{
|
|
|
|
if ( user_timers[i].timer.IsAttached() )
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
{
|
2013-06-11 19:01:49 -04:00
|
|
|
user_timers[i].timer.Cancel();
|
|
|
|
user_timers[i].timer.Detach();
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::LastPrayer()
|
|
|
|
{
|
|
|
|
assert(this);
|
|
|
|
// This must never be called twice.
|
|
|
|
assert(!iszombie);
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// This must be called from a thread using another address space as the
|
|
|
|
// address space of this process is about to be destroyed.
|
|
|
|
Thread* curthread = CurrentThread();
|
|
|
|
assert(curthread->process != this);
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// This can't be called if the process is still alive.
|
|
|
|
assert(!firstthread);
|
2011-08-27 10:46:00 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Disarm and detach all the timers in the process.
|
|
|
|
DeleteTimers();
|
|
|
|
if ( alarm_timer.IsAttached() )
|
2011-08-27 10:46:00 -04:00
|
|
|
{
|
2013-06-11 19:01:49 -04:00
|
|
|
alarm_timer.Cancel();
|
|
|
|
alarm_timer.Detach();
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// We need to temporarily reload the correct addrese space of the dying
|
|
|
|
// process such that we can unmap and free its memory.
|
2014-03-03 18:11:13 -05:00
|
|
|
addr_t prevaddrspace = Memory::SwitchAddressSpace(addrspace);
|
2013-06-11 19:01:49 -04:00
|
|
|
|
|
|
|
ResetAddressSpace();
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
// tty is kept alive in session leader until no longer in limbo.
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( dtable ) dtable.Reset();
|
|
|
|
if ( cwd ) cwd.Reset();
|
|
|
|
if ( root ) root.Reset();
|
|
|
|
if ( mtable ) mtable.Reset();
|
|
|
|
|
|
|
|
// Destroy the address space and safely switch to the replacement
|
|
|
|
// address space before things get dangerous.
|
2014-03-03 18:11:13 -05:00
|
|
|
Memory::DestroyAddressSpace(prevaddrspace);
|
2013-06-11 19:01:49 -04:00
|
|
|
addrspace = 0;
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
ScopedLock family_lock(&process_family_lock);
|
|
|
|
|
|
|
|
iszombie = true;
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Init is nice and will gladly raise our orphaned children and zombies.
|
|
|
|
Process* init = Scheduler::GetInitProcess();
|
|
|
|
assert(init);
|
|
|
|
while ( firstchild )
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
{
|
2013-06-11 19:01:49 -04:00
|
|
|
Process* process = firstchild;
|
|
|
|
firstchild = process->nextsibling;
|
|
|
|
process->parent = init;
|
|
|
|
process->prevsibling = NULL;
|
|
|
|
process->nextsibling = init->firstchild;
|
|
|
|
if ( init->firstchild )
|
|
|
|
init->firstchild->prevsibling = process;
|
|
|
|
init->firstchild = process;
|
2015-07-31 15:59:53 -04:00
|
|
|
process->nozombify = true;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
2013-06-11 19:01:49 -04:00
|
|
|
// Since we have no more children (they are with init now), we don't
|
|
|
|
// have to worry about new zombie processes showing up, so just collect
|
|
|
|
// those that are left. Then we satisfiy the invariant !zombiechild that
|
|
|
|
// applies on process termination.
|
|
|
|
while ( zombiechild )
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
{
|
2013-06-11 19:01:49 -04:00
|
|
|
Process* zombie = zombiechild;
|
|
|
|
zombiechild = zombie->nextsibling;
|
2015-07-31 15:59:53 -04:00
|
|
|
zombie->nextsibling = NULL;
|
|
|
|
if ( zombiechild )
|
|
|
|
zombiechild->prevsibling = NULL;
|
|
|
|
zombie->nozombify = true;
|
|
|
|
zombie->WaitedFor();
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
2013-06-11 19:35:31 -04:00
|
|
|
// Remove ourself from our process group.
|
|
|
|
if ( group )
|
2016-11-15 15:08:41 -05:00
|
|
|
group->GroupRemoveMember(this);
|
|
|
|
// Remove ourself from our session.
|
|
|
|
if ( session )
|
|
|
|
session->SessionRemoveMember(this);
|
|
|
|
|
|
|
|
bool zombify = !nozombify;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// This class instance will be destroyed by our parent process when it
|
|
|
|
// has received and acknowledged our death.
|
|
|
|
if ( parent )
|
|
|
|
parent->NotifyChildExit(this, zombify);
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// If nobody is waiting for us, then simply commit suicide.
|
|
|
|
if ( !zombify )
|
2015-07-31 15:59:53 -04:00
|
|
|
WaitedFor();
|
|
|
|
}
|
2013-06-11 19:35:31 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
void Process::WaitedFor() // process_family_lock taken
|
2015-07-31 15:59:53 -04:00
|
|
|
{
|
|
|
|
parent = NULL;
|
2016-11-15 15:08:41 -05:00
|
|
|
limbo = false;
|
|
|
|
if ( groupfirst || sessionfirst )
|
|
|
|
limbo = true;
|
|
|
|
if ( !limbo )
|
2015-07-31 15:59:53 -04:00
|
|
|
delete this;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void Process::ResetAddressSpace()
|
|
|
|
{
|
2015-05-14 09:19:23 -04:00
|
|
|
ScopedLock lock1(&segment_write_lock);
|
|
|
|
ScopedLock lock2(&segment_lock);
|
2013-08-19 20:23:53 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
assert(Memory::GetAddressSpace() == addrspace);
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-08-19 20:23:53 -04:00
|
|
|
for ( size_t i = 0; i < segments_used; i++ )
|
2014-05-15 13:55:23 -04:00
|
|
|
Memory::UnmapRange(segments[i].addr, segments[i].size, PAGE_USAGE_USER_SPACE);
|
2013-08-19 20:23:53 -04:00
|
|
|
|
|
|
|
Memory::Flush();
|
|
|
|
|
|
|
|
segments_used = segments_length = 0;
|
|
|
|
free(segments);
|
2013-06-11 19:01:49 -04:00
|
|
|
segments = NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
void Process::GroupRemoveMember(Process* child) // process_family_lock taken
|
2013-06-11 19:35:31 -04:00
|
|
|
{
|
|
|
|
assert(child->group == this);
|
|
|
|
if ( child->groupprev )
|
|
|
|
child->groupprev->groupnext = child->groupnext;
|
|
|
|
else
|
|
|
|
groupfirst = child->groupnext;
|
|
|
|
if ( child->groupnext )
|
|
|
|
child->groupnext->groupprev = child->groupprev;
|
|
|
|
child->group = NULL;
|
2016-11-15 15:08:41 -05:00
|
|
|
if ( IsLimboDone() )
|
|
|
|
delete this;
|
|
|
|
}
|
2013-06-11 19:35:31 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
void Process::SessionRemoveMember(Process* child) // process_family_lock taken
|
|
|
|
{
|
|
|
|
assert(child->session == this);
|
|
|
|
if ( child->sessionprev )
|
|
|
|
child->sessionprev->sessionnext = child->sessionnext;
|
|
|
|
else
|
|
|
|
sessionfirst = child->sessionnext;
|
|
|
|
if ( child->sessionnext )
|
|
|
|
child->sessionnext->sessionprev = child->sessionprev;
|
|
|
|
child->session = NULL;
|
|
|
|
if ( !sessionfirst )
|
|
|
|
{
|
|
|
|
// Remove reference to tty when session is empty.
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
tty.Reset();
|
|
|
|
}
|
|
|
|
if ( IsLimboDone() )
|
|
|
|
delete this;
|
2013-06-11 19:35:31 -04:00
|
|
|
}
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
bool Process::IsLimboDone() // process_family_lock taken
|
2013-06-11 19:35:31 -04:00
|
|
|
{
|
2016-11-15 15:08:41 -05:00
|
|
|
return limbo && !groupfirst && !sessionfirst;
|
2013-06-11 19:35:31 -04:00
|
|
|
}
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
// process_family_lock taken
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::NotifyChildExit(Process* child, bool zombify)
|
|
|
|
{
|
|
|
|
if ( child->prevsibling )
|
|
|
|
child->prevsibling->nextsibling = child->nextsibling;
|
|
|
|
if ( child->nextsibling )
|
|
|
|
child->nextsibling->prevsibling = child->prevsibling;
|
|
|
|
if ( firstchild == child )
|
|
|
|
firstchild = child->nextsibling;
|
|
|
|
if ( firstchild )
|
|
|
|
firstchild->prevsibling = NULL;
|
|
|
|
|
|
|
|
if ( zombify )
|
|
|
|
{
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
if ( zombiechild )
|
2013-06-11 19:01:49 -04:00
|
|
|
zombiechild->prevsibling = child;
|
|
|
|
child->prevsibling = NULL;
|
|
|
|
child->nextsibling = zombiechild;
|
|
|
|
zombiechild = child;
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( zombify )
|
2016-11-15 15:08:41 -05:00
|
|
|
{
|
|
|
|
DeliverSignal(SIGCHLD);
|
|
|
|
kthread_cond_broadcast(&zombiecond);
|
|
|
|
}
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-10-01 13:17:49 -04:00
|
|
|
pid_t Process::Wait(pid_t thepid, int* status_ptr, int options)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
// TODO: Process groups are not supported yet.
|
2014-10-01 13:17:49 -04:00
|
|
|
if ( thepid < -1 || thepid == 0 )
|
|
|
|
return errno = ENOSYS, -1;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
ScopedLock lock(&process_family_lock);
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// A process can only wait if it has children.
|
|
|
|
if ( !firstchild && !zombiechild )
|
|
|
|
return errno = ECHILD, -1;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Processes can only wait for their own children to exit.
|
|
|
|
if ( 0 < thepid )
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
{
|
2013-06-11 19:01:49 -04:00
|
|
|
// TODO: This is a slow but multithread safe way to verify that the
|
|
|
|
// target process has the correct parent.
|
|
|
|
bool found = false;
|
|
|
|
for ( Process* p = firstchild; !found && p; p = p->nextsibling )
|
2015-07-31 15:59:53 -04:00
|
|
|
if ( p->pid == thepid && !p->nozombify )
|
2013-06-11 19:01:49 -04:00
|
|
|
found = true;
|
|
|
|
for ( Process* p = zombiechild; !found && p; p = p->nextsibling )
|
2015-07-31 15:59:53 -04:00
|
|
|
if ( p->pid == thepid && !p->nozombify )
|
2013-06-11 19:01:49 -04:00
|
|
|
found = true;
|
|
|
|
if ( !found )
|
|
|
|
return errno = ECHILD, -1;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Process* zombie = NULL;
|
|
|
|
while ( !zombie )
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
{
|
2013-06-11 19:01:49 -04:00
|
|
|
for ( zombie = zombiechild; zombie; zombie = zombie->nextsibling )
|
2015-07-31 15:59:53 -04:00
|
|
|
if ( (thepid == -1 || thepid == zombie->pid) && !zombie->nozombify )
|
2013-06-11 19:01:49 -04:00
|
|
|
break;
|
|
|
|
if ( zombie )
|
|
|
|
break;
|
|
|
|
if ( options & WNOHANG )
|
|
|
|
return 0;
|
2016-11-15 15:08:41 -05:00
|
|
|
if ( !kthread_cond_wait_signal(&zombiecond, &process_family_lock) )
|
2013-06-11 19:01:49 -04:00
|
|
|
return errno = EINTR, -1;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
|
|
|
|
2013-06-11 19:02:01 -04:00
|
|
|
// Remove from the list of zombies.
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( zombie->prevsibling )
|
|
|
|
zombie->prevsibling->nextsibling = zombie->nextsibling;
|
|
|
|
if ( zombie->nextsibling )
|
|
|
|
zombie->nextsibling->prevsibling = zombie->prevsibling;
|
|
|
|
if ( zombiechild == zombie )
|
|
|
|
zombiechild = zombie->nextsibling;
|
|
|
|
if ( zombiechild )
|
|
|
|
zombiechild->prevsibling = NULL;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
thepid = zombie->pid;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// It is safe to access these clocks directly as the child process is no
|
|
|
|
// longer running at this point and the values are nicely frozen.
|
|
|
|
child_execute_clock.Advance(zombie->child_execute_clock.current_time);
|
|
|
|
child_system_clock.Advance(zombie->child_system_clock.current_time);
|
2011-08-27 14:57:39 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
int status = zombie->exit_code;
|
2014-01-19 12:16:19 -05:00
|
|
|
if ( status < 0 )
|
2013-08-04 14:24:59 -04:00
|
|
|
status = WCONSTRUCT(WNATURE_SIGNALED, 128 + SIGKILL, SIGKILL);
|
2012-08-07 18:19:44 -04:00
|
|
|
|
2015-07-31 15:59:53 -04:00
|
|
|
zombie->WaitedFor();
|
2012-08-07 18:19:44 -04:00
|
|
|
|
2014-10-01 13:17:49 -04:00
|
|
|
if ( status_ptr )
|
|
|
|
*status_ptr = status;
|
2014-01-19 12:16:19 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
return thepid;
|
|
|
|
}
|
2012-08-07 18:19:44 -04:00
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
pid_t sys_waitpid(pid_t pid, int* user_status, int options)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2014-10-01 13:17:49 -04:00
|
|
|
int status = 0;
|
|
|
|
pid_t ret = CurrentProcess()->Wait(pid, &status, options);
|
|
|
|
if ( 0 < ret && !CopyToUser(user_status, &status, sizeof(status)) )
|
|
|
|
return -1;
|
|
|
|
return ret;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
2013-05-29 18:32:22 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
void Process::ExitThroughSignal(int signal)
|
|
|
|
{
|
|
|
|
ExitWithCode(WCONSTRUCT(WNATURE_SIGNALED, 128 + signal, signal));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Process::ExitWithCode(int requested_exit_code)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
ScopedLock lock(&threadlock);
|
2013-08-04 14:24:59 -04:00
|
|
|
if ( exit_code == -1 )
|
|
|
|
exit_code = requested_exit_code;
|
2013-06-11 19:01:49 -04:00
|
|
|
|
|
|
|
// Broadcast SIGKILL to all our threads which will begin our long path
|
|
|
|
// of process termination. We simply can't stop the threads as they may
|
|
|
|
// be running in kernel mode doing dangerous stuff. This thread will be
|
|
|
|
// destroyed by SIGKILL once the system call returns.
|
|
|
|
for ( Thread* t = firstthread; t; t = t->nextsibling )
|
|
|
|
t->DeliverSignal(SIGKILL);
|
|
|
|
}
|
2012-08-07 18:19:44 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Ref<MountTable> Process::GetMTable()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(mtable);
|
|
|
|
return mtable;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Ref<DescriptorTable> Process::GetDTable()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(dtable);
|
|
|
|
return dtable;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-03-19 18:34:09 -04:00
|
|
|
Ref<ProcessTable> Process::GetPTable()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(ptable);
|
|
|
|
return ptable;
|
|
|
|
}
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
Ref<Descriptor> Process::GetTTY()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
return tty;
|
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Ref<Descriptor> Process::GetRoot()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(root);
|
|
|
|
return root;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Ref<Descriptor> Process::GetCWD()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(cwd);
|
|
|
|
return cwd;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
void Process::SetTTY(Ref<Descriptor> newtty)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
tty = newtty;
|
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::SetRoot(Ref<Descriptor> newroot)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(newroot);
|
|
|
|
root = newroot;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::SetCWD(Ref<Descriptor> newcwd)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(newcwd);
|
|
|
|
cwd = newcwd;
|
|
|
|
}
|
2012-08-07 18:19:44 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Ref<Descriptor> Process::GetDescriptor(int fd)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&ptrlock);
|
|
|
|
assert(dtable);
|
|
|
|
return dtable->Get(fd);
|
|
|
|
}
|
2012-08-07 18:19:44 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Process* Process::Fork()
|
|
|
|
{
|
|
|
|
assert(CurrentProcess() == this);
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
Process* clone = new Process;
|
|
|
|
if ( !clone )
|
|
|
|
return NULL;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-08-19 20:23:53 -04:00
|
|
|
struct segment* clone_segments = NULL;
|
2013-01-12 20:37:14 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Fork the segment list.
|
|
|
|
if ( segments )
|
|
|
|
{
|
2013-08-19 20:23:53 -04:00
|
|
|
size_t segments_size = sizeof(struct segment) * segments_used;
|
|
|
|
if ( !(clone_segments = (struct segment*) malloc(segments_size)) )
|
|
|
|
{
|
|
|
|
delete clone;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(clone_segments, segments, segments_size);
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
2013-03-19 18:17:36 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Fork address-space here and copy memory.
|
|
|
|
clone->addrspace = Memory::Fork();
|
|
|
|
if ( !clone->addrspace )
|
|
|
|
{
|
2013-08-19 20:23:53 -04:00
|
|
|
free(clone_segments);
|
|
|
|
delete clone;
|
|
|
|
return NULL;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
2013-05-22 16:11:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Now it's too late to clean up here, if anything goes wrong, we simply
|
|
|
|
// ask the process to commit suicide before it goes live.
|
2013-08-19 20:23:53 -04:00
|
|
|
clone->segments = clone_segments;
|
|
|
|
clone->segments_used = segments_used;
|
|
|
|
clone->segments_length = segments_used;
|
2013-05-22 16:11:29 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
kthread_mutex_lock(&process_family_lock);
|
|
|
|
|
|
|
|
if ( (clone->pid = (clone->ptable = ptable)->Allocate(clone)) < 0 )
|
|
|
|
{
|
|
|
|
kthread_mutex_unlock(&process_family_lock);
|
|
|
|
clone->AbortConstruction();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Remember the relation to the child process.
|
2016-11-15 15:08:41 -05:00
|
|
|
clone->parent = this;
|
|
|
|
clone->nextsibling = firstchild;
|
|
|
|
clone->prevsibling = NULL;
|
|
|
|
if ( firstchild )
|
|
|
|
firstchild->prevsibling = clone;
|
|
|
|
firstchild = clone;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:02:01 -04:00
|
|
|
// Add the new process to the current process group.
|
|
|
|
clone->group = group;
|
|
|
|
clone->groupprev = NULL;
|
|
|
|
if ( (clone->groupnext = group->groupfirst) )
|
|
|
|
group->groupfirst->groupprev = clone;
|
|
|
|
group->groupfirst = clone;
|
2016-11-15 15:08:41 -05:00
|
|
|
|
|
|
|
// Add the new process to the current session.
|
|
|
|
clone->session = session;
|
|
|
|
clone->sessionprev = NULL;
|
|
|
|
if ( (clone->sessionnext = session->sessionfirst) )
|
|
|
|
session->sessionfirst->sessionprev = clone;
|
|
|
|
session->sessionfirst = clone;
|
|
|
|
|
|
|
|
kthread_mutex_unlock(&process_family_lock);
|
2013-06-11 19:02:01 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Initialize everything that is safe and can't fail.
|
2013-08-30 17:55:09 -04:00
|
|
|
kthread_mutex_lock(&resource_limits_lock);
|
|
|
|
for ( size_t i = 0; i < RLIMIT_NUM_DECLARED; i++ )
|
|
|
|
clone->resource_limits[i] = resource_limits[i];
|
|
|
|
kthread_mutex_unlock(&resource_limits_lock);
|
|
|
|
|
2013-08-30 11:35:30 -04:00
|
|
|
kthread_mutex_lock(&nicelock);
|
|
|
|
clone->nice = nice;
|
|
|
|
kthread_mutex_unlock(&nicelock);
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
kthread_mutex_lock(&ptrlock);
|
|
|
|
clone->root = root;
|
|
|
|
clone->cwd = cwd;
|
|
|
|
kthread_mutex_unlock(&ptrlock);
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-09-15 15:05:14 -04:00
|
|
|
kthread_mutex_lock(&idlock);
|
|
|
|
clone->uid = uid;
|
|
|
|
clone->gid = gid;
|
|
|
|
clone->euid = euid;
|
|
|
|
clone->egid = egid;
|
|
|
|
clone->umask = umask;
|
|
|
|
kthread_mutex_unlock(&idlock);
|
|
|
|
|
|
|
|
kthread_mutex_lock(&signal_lock);
|
|
|
|
memcpy(&clone->signal_actions, &signal_actions, sizeof(signal_actions));
|
|
|
|
sigemptyset(&clone->signal_pending);
|
|
|
|
clone->sigreturn = sigreturn;
|
|
|
|
kthread_mutex_unlock(&signal_lock);
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// Initialize things that can fail and abort if needed.
|
|
|
|
bool failure = false;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
kthread_mutex_lock(&ptrlock);
|
|
|
|
if ( !(clone->dtable = dtable->Fork()) )
|
|
|
|
failure = true;
|
|
|
|
//if ( !(clone->mtable = mtable->Fork()) )
|
|
|
|
// failure = true;
|
|
|
|
clone->mtable = mtable;
|
|
|
|
kthread_mutex_unlock(&ptrlock);
|
2011-11-20 09:58:42 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( !(clone->program_image_path = String::Clone(program_image_path)) )
|
2014-09-15 14:58:12 -04:00
|
|
|
failure = true;
|
2013-05-11 19:24:42 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// If the proces creation failed, ask the process to commit suicide and
|
|
|
|
// not become a zombie, as we don't wait for it to exit. It will clean
|
|
|
|
// up all the above resources and delete itself.
|
|
|
|
if ( failure )
|
|
|
|
{
|
|
|
|
clone->AbortConstruction();
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-11-09 17:18:26 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
return clone;
|
|
|
|
}
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
void Process::ResetForExecute()
|
|
|
|
{
|
|
|
|
DeleteTimers();
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
for ( int i = 0; i < SIG_MAX_NUM; i++ )
|
|
|
|
{
|
|
|
|
signal_actions[i].sa_flags = 0;
|
|
|
|
if ( signal_actions[i].sa_handler == SIG_DFL )
|
|
|
|
continue;
|
|
|
|
if ( signal_actions[i].sa_handler == SIG_IGN )
|
|
|
|
continue;
|
|
|
|
signal_actions[i].sa_handler = SIG_DFL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sigreturn = NULL;
|
|
|
|
stack_t* signal_stack = &CurrentThread()->signal_stack;
|
|
|
|
memset(signal_stack, 0, sizeof(*signal_stack));
|
|
|
|
signal_stack->ss_flags = SS_DISABLE;
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
ResetAddressSpace();
|
|
|
|
}
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2014-02-17 17:53:03 -05:00
|
|
|
bool Process::MapSegment(struct segment* result, void* hint, size_t size,
|
|
|
|
int flags, int prot)
|
|
|
|
{
|
2015-05-14 09:19:23 -04:00
|
|
|
// process->segment_write_lock is held at this point.
|
2014-02-17 17:53:03 -05:00
|
|
|
// process->segment_lock is held at this point.
|
|
|
|
|
|
|
|
if ( !size )
|
2016-01-07 18:41:20 -05:00
|
|
|
size = 1;
|
2014-02-17 17:53:03 -05:00
|
|
|
|
|
|
|
if ( !PlaceSegment(result, this, hint, size, flags) )
|
|
|
|
return false;
|
|
|
|
if ( !Memory::MapMemory(this, result->addr, result->size, result->prot = prot) )
|
|
|
|
{
|
|
|
|
// The caller is expected to self-destruct in this case, so the
|
|
|
|
// segment just created is not removed.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
Memory::Flush();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
int Process::Execute(const char* programname, const uint8_t* program,
|
|
|
|
size_t programsize, int argc, const char* const* argv,
|
|
|
|
int envc, const char* const* envp,
|
2014-03-03 18:11:13 -05:00
|
|
|
struct thread_registers* regs)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2014-02-17 17:53:03 -05:00
|
|
|
assert(argc != INT_MAX);
|
|
|
|
assert(envc != INT_MAX);
|
2013-06-11 19:01:49 -04:00
|
|
|
assert(CurrentProcess() == this);
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
char* programname_clone = String::Clone(programname);
|
|
|
|
if ( !programname_clone )
|
|
|
|
return -1;
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2014-02-17 14:56:19 -05:00
|
|
|
ELF::Auxiliary aux;
|
|
|
|
|
2014-06-25 19:05:07 -04:00
|
|
|
addr_t entry = ELF::Load(program, programsize, &aux);
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( !entry ) { delete[] programname_clone; return -1; }
|
2011-11-09 17:18:26 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
delete[] program_image_path;
|
|
|
|
program_image_path = programname_clone; programname_clone = NULL;
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2013-08-22 10:08:08 -04:00
|
|
|
uintptr_t userspace_addr;
|
|
|
|
size_t userspace_size;
|
|
|
|
Memory::GetUserVirtualArea(&userspace_addr, &userspace_size);
|
|
|
|
|
2014-02-17 17:53:03 -05:00
|
|
|
const size_t stack_size = 512UL * 1024UL;
|
|
|
|
void* stack_hint = (void*) (userspace_addr + userspace_size - stack_size);
|
|
|
|
const int stack_prot = PROT_READ | PROT_WRITE | PROT_KREAD | PROT_KWRITE | PROT_FORK;
|
|
|
|
|
|
|
|
if ( !aux.tls_mem_align )
|
|
|
|
aux.tls_mem_align = 1;
|
|
|
|
if ( Page::Size() < aux.tls_mem_align )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
if ( !aux.uthread_align )
|
|
|
|
aux.uthread_align = 1;
|
|
|
|
if ( Page::Size() < aux.uthread_align )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
if ( aux.uthread_size < sizeof(struct uthread) )
|
|
|
|
aux.uthread_size = sizeof(struct uthread);
|
|
|
|
|
|
|
|
size_t raw_tls_size = aux.tls_mem_size;
|
|
|
|
size_t raw_tls_size_aligned = -(-raw_tls_size & ~(aux.tls_mem_align-1));
|
|
|
|
if ( raw_tls_size && raw_tls_size_aligned == 0 /* overflow */ )
|
|
|
|
return errno = EINVAL, -1;
|
2015-08-27 15:39:35 -04:00
|
|
|
int raw_tls_kprot = PROT_KWRITE | PROT_FORK;
|
|
|
|
int raw_tls_prot = PROT_READ | PROT_KREAD | PROT_FORK;
|
2014-02-17 17:53:03 -05:00
|
|
|
void* raw_tls_hint = stack_hint;
|
|
|
|
|
|
|
|
size_t tls_size = raw_tls_size_aligned + aux.uthread_size;
|
|
|
|
size_t tls_offset_tls = 0;
|
|
|
|
size_t tls_offset_uthread = raw_tls_size_aligned;
|
|
|
|
if ( aux.tls_mem_align < aux.uthread_align )
|
|
|
|
{
|
|
|
|
size_t more_aligned = -(-raw_tls_size_aligned & ~(aux.uthread_align-1));
|
|
|
|
if ( raw_tls_size_aligned && more_aligned == 0 /* overflow */ )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
size_t difference = more_aligned - raw_tls_size_aligned;
|
|
|
|
tls_size += difference;
|
|
|
|
tls_offset_tls += difference;
|
|
|
|
tls_offset_uthread += difference;
|
|
|
|
}
|
|
|
|
assert((tls_offset_tls & (aux.tls_mem_align-1)) == 0);
|
|
|
|
assert((tls_offset_uthread & (aux.uthread_align-1)) == 0);
|
|
|
|
int tls_prot = PROT_READ | PROT_WRITE | PROT_KREAD | PROT_KWRITE | PROT_FORK;
|
|
|
|
void* tls_hint = stack_hint;
|
2013-08-22 10:08:08 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
size_t auxcode_size = Page::Size();
|
2015-08-27 15:39:35 -04:00
|
|
|
int auxcode_kprot = PROT_KWRITE | PROT_FORK;
|
|
|
|
int auxcode_prot = PROT_EXEC | PROT_READ | PROT_KREAD | PROT_FORK;
|
2013-08-04 14:24:59 -04:00
|
|
|
void* auxcode_hint = stack_hint;
|
|
|
|
|
2014-02-17 17:53:03 -05:00
|
|
|
size_t arg_size = 0;
|
|
|
|
|
|
|
|
size_t argv_size = sizeof(char*) * (argc + 1);
|
|
|
|
size_t envp_size = sizeof(char*) * (envc + 1);
|
|
|
|
|
|
|
|
arg_size += argv_size;
|
|
|
|
arg_size += envp_size;
|
|
|
|
|
|
|
|
for ( int i = 0; i < argc; i++ )
|
|
|
|
arg_size += strlen(argv[i]) + 1;
|
|
|
|
for ( int i = 0; i < envc; i++ )
|
|
|
|
arg_size += strlen(envp[i]) + 1;
|
|
|
|
|
|
|
|
struct segment arg_segment;
|
2013-08-22 10:08:08 -04:00
|
|
|
struct segment stack_segment;
|
2014-02-17 17:53:03 -05:00
|
|
|
struct segment raw_tls_segment;
|
|
|
|
struct segment tls_segment;
|
2013-08-04 14:24:59 -04:00
|
|
|
struct segment auxcode_segment;
|
2014-02-17 17:53:03 -05:00
|
|
|
|
2015-05-14 09:19:23 -04:00
|
|
|
kthread_mutex_lock(&segment_write_lock);
|
2014-02-17 17:53:03 -05:00
|
|
|
kthread_mutex_lock(&segment_lock);
|
|
|
|
|
|
|
|
if ( !(MapSegment(&arg_segment, stack_hint, arg_size, 0, stack_prot) &&
|
|
|
|
MapSegment(&stack_segment, stack_hint, stack_size, 0, stack_prot) &&
|
2015-08-27 15:39:35 -04:00
|
|
|
MapSegment(&raw_tls_segment, raw_tls_hint, raw_tls_size, 0, raw_tls_kprot) &&
|
2013-08-04 14:24:59 -04:00
|
|
|
MapSegment(&tls_segment, tls_hint, tls_size, 0, tls_prot) &&
|
2015-08-27 15:39:35 -04:00
|
|
|
MapSegment(&auxcode_segment, auxcode_hint, auxcode_size, 0, auxcode_kprot)) )
|
2013-08-22 10:08:08 -04:00
|
|
|
{
|
|
|
|
kthread_mutex_unlock(&segment_lock);
|
2015-05-14 09:19:23 -04:00
|
|
|
kthread_mutex_unlock(&segment_write_lock);
|
2013-08-22 10:08:08 -04:00
|
|
|
ResetForExecute();
|
|
|
|
return errno = ENOMEM, -1;
|
|
|
|
}
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2016-01-07 18:41:20 -05:00
|
|
|
char** target_argv = (char**) ((char*) arg_segment.addr + 0);
|
|
|
|
char** target_envp = (char**) ((char*) arg_segment.addr + argv_size);
|
|
|
|
char* target_strings = (char*) ((char*) arg_segment.addr + argv_size + envp_size);
|
2014-02-17 17:53:03 -05:00
|
|
|
size_t target_strings_offset = 0;
|
2011-11-22 07:53:36 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
for ( int i = 0; i < argc; i++ )
|
|
|
|
{
|
2014-02-17 17:53:03 -05:00
|
|
|
const char* arg = argv[i];
|
|
|
|
size_t arg_len = strlen(arg);
|
|
|
|
char* target_arg = target_strings + target_strings_offset;
|
|
|
|
strcpy(target_arg, arg);
|
|
|
|
target_argv[i] = target_arg;
|
|
|
|
target_strings_offset += arg_len + 1;
|
2011-11-22 07:53:36 -05:00
|
|
|
}
|
2014-02-17 17:53:03 -05:00
|
|
|
target_argv[argc] = (char*) NULL;
|
2011-11-22 07:53:36 -05:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
for ( int i = 0; i < envc; i++ )
|
2011-11-22 07:53:36 -05:00
|
|
|
{
|
2014-02-17 17:53:03 -05:00
|
|
|
const char* env = envp[i];
|
|
|
|
size_t env_len = strlen(env);
|
|
|
|
char* target_env = target_strings + target_strings_offset;
|
|
|
|
strcpy(target_env, env);
|
|
|
|
target_envp[i] = target_env;
|
|
|
|
target_strings_offset += env_len + 1;
|
2011-11-22 07:53:36 -05:00
|
|
|
}
|
2014-02-17 17:53:03 -05:00
|
|
|
target_envp[envc] = (char*) NULL;
|
|
|
|
|
|
|
|
const uint8_t* file_raw_tls = program + aux.tls_file_offset;
|
|
|
|
|
|
|
|
uint8_t* target_raw_tls = (uint8_t*) raw_tls_segment.addr;
|
|
|
|
memcpy(target_raw_tls, file_raw_tls, aux.tls_file_size);
|
|
|
|
memset(target_raw_tls + aux.tls_file_size, 0, aux.tls_mem_size - aux.tls_file_size);
|
2015-08-27 15:39:35 -04:00
|
|
|
Memory::ProtectMemory(this, raw_tls_segment.addr, raw_tls_segment.size, raw_tls_prot);
|
2014-02-17 17:53:03 -05:00
|
|
|
|
|
|
|
uint8_t* target_tls = (uint8_t*) (tls_segment.addr + tls_offset_tls);
|
|
|
|
assert((((uintptr_t) target_tls) & (aux.tls_mem_align-1)) == 0);
|
|
|
|
memcpy(target_tls, file_raw_tls, aux.tls_file_size);
|
|
|
|
memset(target_tls + aux.tls_file_size, 0, aux.tls_mem_size - aux.tls_file_size);
|
|
|
|
|
|
|
|
struct uthread* uthread = (struct uthread*) (tls_segment.addr + tls_offset_uthread);
|
|
|
|
assert((((uintptr_t) uthread) & (aux.uthread_align-1)) == 0);
|
|
|
|
memset(uthread, 0, sizeof(*uthread));
|
|
|
|
uthread->uthread_pointer = uthread;
|
|
|
|
uthread->uthread_size = aux.uthread_size;
|
|
|
|
uthread->uthread_flags = UTHREAD_FLAG_INITIAL;
|
|
|
|
uthread->tls_master_mmap = (void*) raw_tls_segment.addr;
|
|
|
|
uthread->tls_master_size = aux.tls_mem_size;
|
|
|
|
uthread->tls_master_align = aux.tls_mem_align;
|
|
|
|
uthread->tls_mmap = (void*) tls_segment.addr;
|
|
|
|
uthread->tls_size = tls_size;
|
|
|
|
uthread->stack_mmap = (void*) stack_segment.addr;
|
|
|
|
uthread->stack_size = stack_segment.size;
|
|
|
|
uthread->arg_mmap = (void*) arg_segment.addr;
|
|
|
|
uthread->arg_size = arg_segment.size;
|
|
|
|
memset(uthread + 1, 0, aux.uthread_size - sizeof(struct uthread));
|
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
memset(regs, 0, sizeof(*regs));
|
2014-02-17 17:53:03 -05:00
|
|
|
#if defined(__i386__)
|
2014-03-03 18:11:13 -05:00
|
|
|
regs->eax = argc;
|
|
|
|
regs->ebx = (size_t) target_argv;
|
|
|
|
regs->edx = envc;
|
|
|
|
regs->ecx = (size_t) target_envp;
|
|
|
|
regs->eip = entry;
|
|
|
|
regs->esp = (stack_segment.addr + stack_segment.size) & ~15UL;
|
|
|
|
regs->ebp = regs->esp;
|
|
|
|
regs->cs = UCS | URPL;
|
|
|
|
regs->ds = UDS | URPL;
|
|
|
|
regs->ss = UDS | URPL;
|
|
|
|
regs->eflags = FLAGS_RESERVED1 | FLAGS_INTERRUPT | FLAGS_ID;
|
|
|
|
regs->signal_pending = 0;
|
|
|
|
regs->gsbase = (uint32_t) uthread;
|
|
|
|
regs->cr3 = addrspace;
|
|
|
|
regs->kernel_stack = GDT::GetKernelStack();
|
|
|
|
memcpy(regs->fpuenv, Float::fpu_initialized_regs, 512);
|
2014-02-17 17:53:03 -05:00
|
|
|
#elif defined(__x86_64__)
|
2014-03-03 18:11:13 -05:00
|
|
|
regs->rdi = argc;
|
|
|
|
regs->rsi = (size_t) target_argv;
|
|
|
|
regs->rdx = envc;
|
|
|
|
regs->rcx = (size_t) target_envp;
|
|
|
|
regs->rip = entry;
|
|
|
|
regs->rsp = (stack_segment.addr + stack_segment.size) & ~15UL;
|
|
|
|
regs->rbp = regs->rsp;
|
|
|
|
regs->cs = UCS | URPL;
|
|
|
|
regs->ds = UDS | URPL;
|
|
|
|
regs->ss = UDS | URPL;
|
|
|
|
regs->rflags = FLAGS_RESERVED1 | FLAGS_INTERRUPT | FLAGS_ID;
|
|
|
|
regs->signal_pending = 0;
|
|
|
|
regs->fsbase = (uint64_t) uthread;
|
|
|
|
regs->cr3 = addrspace;
|
|
|
|
regs->kernel_stack = GDT::GetKernelStack();
|
|
|
|
memcpy(regs->fpuenv, Float::fpu_initialized_regs, 512);
|
|
|
|
#else
|
|
|
|
#warning "You need to implement initializing the first thread after execute"
|
2014-02-17 17:53:03 -05:00
|
|
|
#endif
|
2012-04-03 19:41:29 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
uint8_t* auxcode = (uint8_t*) auxcode_segment.addr;
|
|
|
|
#if defined(__i386__)
|
|
|
|
sigreturn = (void (*)(void)) &auxcode[0];
|
|
|
|
auxcode[0] = 0xCD; /* int .... */
|
|
|
|
auxcode[1] = 0x83; /* ... $131 */
|
|
|
|
#elif defined(__x86_64__)
|
|
|
|
sigreturn = (void (*)(void)) &auxcode[0];
|
|
|
|
auxcode[0] = 0xCD; /* int .... */
|
|
|
|
auxcode[1] = 0x83; /* ... $131 */
|
|
|
|
#else
|
|
|
|
(void) auxcode;
|
|
|
|
#warning "You need to initialize auxcode with a sigreturn routine"
|
|
|
|
#endif
|
2015-08-27 15:39:35 -04:00
|
|
|
Memory::ProtectMemory(this, auxcode_segment.addr, auxcode_segment.size, auxcode_prot);
|
|
|
|
|
|
|
|
kthread_mutex_unlock(&segment_lock);
|
|
|
|
kthread_mutex_unlock(&segment_write_lock);
|
2013-08-04 14:24:59 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
dtable->OnExecute();
|
2011-08-27 17:03:39 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
return 0;
|
|
|
|
}
|
2011-11-09 17:18:26 -05:00
|
|
|
|
2014-05-10 15:34:29 -04:00
|
|
|
static
|
|
|
|
const char* shebang_lookup_environment(const char* name, char* const* envp)
|
|
|
|
{
|
|
|
|
size_t equalpos = strcspn(name, "=");
|
|
|
|
if ( name[equalpos] == '=' )
|
|
|
|
return NULL;
|
|
|
|
size_t namelen = equalpos;
|
|
|
|
for ( size_t i = 0; envp[i]; i++ )
|
|
|
|
{
|
|
|
|
if ( strncmp(name, envp[i], namelen) )
|
|
|
|
continue;
|
|
|
|
if ( envp[i][namelen] != '=' )
|
|
|
|
continue;
|
|
|
|
return envp[i] + namelen + 1;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char* shebang_tokenize(char** saved)
|
|
|
|
{
|
|
|
|
char* data = *saved;
|
|
|
|
if ( !data )
|
|
|
|
return *saved = NULL;
|
2015-08-11 09:37:31 -04:00
|
|
|
while ( data[0] && isspace((unsigned char) data[0]) )
|
2014-05-10 15:34:29 -04:00
|
|
|
data++;
|
|
|
|
if ( !data[0] )
|
|
|
|
return *saved = NULL;
|
|
|
|
size_t input = 0;
|
|
|
|
size_t output = 0;
|
|
|
|
bool singly = false;
|
|
|
|
bool doubly = false;
|
|
|
|
bool escaped = false;
|
|
|
|
for ( ; data[input]; input++ )
|
|
|
|
{
|
|
|
|
char c = data[input];
|
2015-08-11 09:37:31 -04:00
|
|
|
if ( !escaped && !singly && !doubly && isspace((unsigned char) c) )
|
2014-05-10 15:34:29 -04:00
|
|
|
break;
|
|
|
|
if ( !escaped && !doubly && c == '\'' )
|
|
|
|
{
|
|
|
|
singly = !singly;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ( !escaped && !singly && c == '"' )
|
|
|
|
{
|
|
|
|
doubly = !doubly;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ( !singly && !escaped && c == '\\' )
|
|
|
|
{
|
|
|
|
escaped = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ( escaped )
|
|
|
|
{
|
|
|
|
switch ( c )
|
|
|
|
{
|
|
|
|
case 'a': c = '\a'; break;
|
|
|
|
case 'b': c = '\b'; break;
|
|
|
|
case 'e': c = '\e'; break;
|
|
|
|
case 'f': c = '\f'; break;
|
|
|
|
case 'n': c = '\n'; break;
|
|
|
|
case 'r': c = '\r'; break;
|
|
|
|
case 't': c = '\t'; break;
|
|
|
|
case 'v': c = '\v'; break;
|
|
|
|
default: break;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
escaped = false;
|
|
|
|
data[output++] = c;
|
|
|
|
}
|
|
|
|
if ( data[input] )
|
|
|
|
*saved = data + input + 1;
|
|
|
|
else
|
|
|
|
*saved = NULL;
|
|
|
|
data[output] = '\0';
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t shebang_count_arguments(char* line)
|
|
|
|
{
|
|
|
|
size_t result = 0;
|
|
|
|
while ( shebang_tokenize(&line) )
|
|
|
|
result++;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: The PATH-searching logic is repeated multiple places. Until this logic
|
|
|
|
// can be shared somehow, you need to keep this comment in sync as well
|
|
|
|
// as the logic in these files:
|
|
|
|
// * kernel/process.cpp
|
2016-02-28 06:11:02 -05:00
|
|
|
// * libc/unistd/execvpe.c
|
2016-02-28 18:40:20 -05:00
|
|
|
// * utils/which.c
|
2014-12-28 10:40:36 -05:00
|
|
|
// NOTE: See comments in execvpe() for algorithmic commentary.
|
2014-05-10 15:34:29 -04:00
|
|
|
|
2014-05-14 08:41:51 -04:00
|
|
|
static bool sys_execve_alloc(addralloc_t* alloc, size_t size)
|
|
|
|
{
|
|
|
|
if ( !AllocateKernelAddress(alloc, size) )
|
|
|
|
return false;
|
|
|
|
if ( !Memory::MapRange(alloc->from, alloc->size, PROT_KREAD | PROT_KWRITE, PAGE_USAGE_EXECUTE) )
|
|
|
|
return FreeKernelAddress(alloc), false;
|
|
|
|
Memory::Flush();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sys_execve_free(addralloc_t* alloc)
|
|
|
|
{
|
|
|
|
Memory::UnmapRange(alloc->from, alloc->size, PAGE_USAGE_EXECUTE);
|
|
|
|
Memory::Flush();
|
|
|
|
FreeKernelAddress(alloc);
|
|
|
|
}
|
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
static
|
|
|
|
int sys_execve_kernel(const char* filename,
|
|
|
|
int argc,
|
2015-01-22 18:29:04 -05:00
|
|
|
char* const* argv,
|
2014-05-10 11:21:27 -04:00
|
|
|
int envc,
|
2015-01-22 18:29:04 -05:00
|
|
|
char* const* envp,
|
2014-03-03 18:11:13 -05:00
|
|
|
struct thread_registers* regs)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2014-05-10 11:21:27 -04:00
|
|
|
Process* process = CurrentProcess();
|
|
|
|
|
|
|
|
ioctx_t ctx;
|
|
|
|
SetupKernelIOCtx(&ctx);
|
|
|
|
Ref<Descriptor> from = filename[0] == '/' ? process->GetRoot() : process->GetCWD();
|
|
|
|
Ref<Descriptor> desc = from->open(&ctx, filename, O_EXEC | O_READ, 0);
|
|
|
|
if ( !desc )
|
|
|
|
return -1;
|
2015-07-09 12:32:30 -04:00
|
|
|
from.Reset();
|
2014-05-10 11:21:27 -04:00
|
|
|
|
|
|
|
struct stat st;
|
|
|
|
if ( desc->stat(&ctx, &st) )
|
|
|
|
return -1;
|
2015-09-20 16:05:24 -04:00
|
|
|
if ( !(st.st_mode & 0111) )
|
|
|
|
return errno = EACCES, -1;
|
2014-05-10 11:21:27 -04:00
|
|
|
if ( st.st_size < 0 )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
if ( (uintmax_t) SIZE_MAX < (uintmax_t) st.st_size )
|
|
|
|
return errno = EFBIG, -1;
|
|
|
|
|
|
|
|
size_t filesize = (size_t) st.st_size;
|
2014-05-14 08:41:51 -04:00
|
|
|
|
|
|
|
addralloc_t buffer_alloc;
|
|
|
|
if ( !sys_execve_alloc(&buffer_alloc, filesize) )
|
2014-05-10 11:21:27 -04:00
|
|
|
return -1;
|
|
|
|
|
2014-05-14 08:41:51 -04:00
|
|
|
uint8_t* buffer = (uint8_t*) buffer_alloc.from;
|
2014-05-10 11:21:27 -04:00
|
|
|
for ( size_t sofar = 0; sofar < filesize; )
|
|
|
|
{
|
|
|
|
ssize_t amount = desc->read(&ctx, buffer + sofar, filesize - sofar);
|
|
|
|
if ( amount < 0 )
|
2014-05-14 08:41:51 -04:00
|
|
|
return sys_execve_free(&buffer_alloc), -1;
|
2014-05-10 11:21:27 -04:00
|
|
|
if ( amount == 0 )
|
2014-05-14 08:41:51 -04:00
|
|
|
return sys_execve_free(&buffer_alloc), errno = EEOF, -1;
|
2014-05-10 11:21:27 -04:00
|
|
|
sofar += amount;
|
|
|
|
}
|
|
|
|
|
2015-07-09 12:32:30 -04:00
|
|
|
desc.Reset();
|
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
int result = process->Execute(filename, buffer, filesize, argc, argv, envc, envp, regs);
|
|
|
|
|
2014-05-10 15:34:29 -04:00
|
|
|
if ( result == 0 || errno != ENOEXEC ||
|
|
|
|
filesize < 2 || buffer[0] != '#' || buffer[1] != '!' )
|
2014-05-14 08:41:51 -04:00
|
|
|
return sys_execve_free(&buffer_alloc), result;
|
2014-05-10 15:34:29 -04:00
|
|
|
|
|
|
|
size_t line_length = 0;
|
2016-01-07 18:41:20 -05:00
|
|
|
while ( 2 + line_length < filesize && buffer[2 + line_length] != '\n' )
|
2014-05-10 15:34:29 -04:00
|
|
|
line_length++;
|
|
|
|
if ( line_length == filesize )
|
2014-05-14 08:41:51 -04:00
|
|
|
return sys_execve_free(&buffer_alloc), errno = ENOEXEC, -1;
|
2014-05-10 15:34:29 -04:00
|
|
|
|
|
|
|
char* line = new char[line_length+1];
|
|
|
|
if ( !line )
|
2014-05-14 08:41:51 -04:00
|
|
|
return sys_execve_free(&buffer_alloc), -1;
|
2014-05-10 15:34:29 -04:00
|
|
|
memcpy(line, buffer + 2, line_length);
|
|
|
|
line[line_length] = '\0';
|
2014-05-14 08:41:51 -04:00
|
|
|
sys_execve_free(&buffer_alloc);
|
2014-05-10 11:21:27 -04:00
|
|
|
|
2014-05-10 15:34:29 -04:00
|
|
|
char* line_clone = String::Clone(line);
|
|
|
|
if ( !line_clone )
|
|
|
|
return delete[] line, -1;
|
|
|
|
size_t argument_count = shebang_count_arguments(line_clone);
|
|
|
|
delete[] line_clone;
|
|
|
|
|
|
|
|
if ( !argument_count || INT_MAX < argument_count )
|
|
|
|
return delete[] line, errno = ENOEXEC, -1;
|
|
|
|
|
|
|
|
int sb_argc = (int) argument_count;
|
|
|
|
char** sb_argv = new char*[sb_argc];
|
|
|
|
if ( !sb_argv )
|
|
|
|
return delete[] line, -1;
|
|
|
|
|
|
|
|
char* sb_saved = line;
|
|
|
|
for ( int i = 0; i < sb_argc; i++ )
|
|
|
|
sb_argv[i] = shebang_tokenize(&sb_saved);
|
|
|
|
|
|
|
|
if ( INT_MAX - argc <= sb_argc )
|
|
|
|
return delete[] sb_argv, delete[] line, errno = EOVERFLOW, -1;
|
|
|
|
|
2016-01-07 18:41:20 -05:00
|
|
|
if ( !sb_argv[0] || !sb_argv[0][0] )
|
2014-05-10 15:34:29 -04:00
|
|
|
return delete[] sb_argv, delete[] line, errno = ENOENT, -1;
|
|
|
|
|
|
|
|
int new_argc = sb_argc + argc;
|
|
|
|
char** new_argv = new char*[new_argc + 1];
|
|
|
|
if ( !new_argv )
|
|
|
|
return delete[] sb_argv, delete[] line, -1;
|
|
|
|
|
|
|
|
for ( int i = 0; i < sb_argc; i++ )
|
|
|
|
new_argv[i] = sb_argv[i];
|
|
|
|
new_argv[sb_argc + 0] = (char*) filename;
|
|
|
|
for ( int i = 1; i < argc; i++ )
|
|
|
|
new_argv[sb_argc + i] = argv[i];
|
|
|
|
new_argv[new_argc] = (char*) NULL;
|
|
|
|
|
|
|
|
result = -1;
|
|
|
|
|
|
|
|
// (See the above comment block before editing this searching logic)
|
|
|
|
const char* path = shebang_lookup_environment("PATH", envp);
|
|
|
|
bool search_path = !strchr(sb_argv[0], '/') && path;
|
|
|
|
bool any_tries = false;
|
|
|
|
bool any_eacces = false;
|
|
|
|
|
|
|
|
const char* new_argv0 = sb_argv[0];
|
|
|
|
while ( search_path && *path )
|
|
|
|
{
|
|
|
|
size_t len = strcspn(path, ":");
|
|
|
|
if ( !len )
|
|
|
|
{
|
|
|
|
path++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
any_tries = true;
|
|
|
|
|
|
|
|
char* dirpath = strndup(path, len);
|
|
|
|
if ( !dirpath )
|
|
|
|
return -1;
|
|
|
|
if ( (path += len)[0] == ':' )
|
|
|
|
path++;
|
|
|
|
while ( len && dirpath[len - 1] == '/' )
|
|
|
|
dirpath[--len] = '\0';
|
|
|
|
|
|
|
|
char* fullpath;
|
|
|
|
if ( asprintf(&fullpath, "%s/%s", dirpath, sb_argv[0]) < 0 )
|
|
|
|
return free(dirpath), -1;
|
|
|
|
|
|
|
|
result = sys_execve_kernel(fullpath, new_argc, new_argv, envc, envp, regs);
|
|
|
|
|
|
|
|
free(fullpath);
|
|
|
|
free(dirpath);
|
|
|
|
|
|
|
|
if ( result == 0 )
|
|
|
|
break;
|
|
|
|
|
|
|
|
if ( errno == ENOENT )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ( errno == ELOOP ||
|
|
|
|
errno == EISDIR ||
|
|
|
|
errno == ENAMETOOLONG ||
|
|
|
|
errno == ENOTDIR )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ( errno == EACCES )
|
|
|
|
{
|
|
|
|
any_eacces = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( errno == EACCES )
|
|
|
|
{
|
|
|
|
any_eacces = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( !any_tries )
|
|
|
|
result = sys_execve_kernel(new_argv0, new_argc, new_argv, envc, envp, regs);
|
|
|
|
|
|
|
|
if ( result < 0 && any_eacces )
|
|
|
|
errno = EACCES;
|
|
|
|
|
|
|
|
delete[] new_argv;
|
|
|
|
delete[] sb_argv;
|
|
|
|
delete[] line;
|
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
return result;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
int sys_execve(const char* user_filename,
|
2015-01-22 18:29:04 -05:00
|
|
|
char* const* user_argv,
|
|
|
|
char* const* user_envp)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
char* filename;
|
|
|
|
int argc;
|
|
|
|
int envc;
|
|
|
|
char** argv;
|
|
|
|
char** envp;
|
|
|
|
int result = -1;
|
2014-03-03 18:11:13 -05:00
|
|
|
struct thread_registers regs;
|
2013-06-11 19:01:49 -04:00
|
|
|
memset(®s, 0, sizeof(regs));
|
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
if ( !user_filename || !user_argv || !user_envp )
|
|
|
|
return errno = EFAULT, -1;
|
2013-06-11 19:01:49 -04:00
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
if ( !(filename = GetStringFromUser(user_filename)) )
|
|
|
|
goto cleanup_done;
|
|
|
|
|
|
|
|
argc = 0;
|
|
|
|
while ( true )
|
|
|
|
{
|
|
|
|
const char* user_arg;
|
|
|
|
if ( !CopyFromUser(&user_arg, user_argv + argc, sizeof(user_arg)) )
|
|
|
|
goto cleanup_filename;
|
|
|
|
if ( !user_arg )
|
|
|
|
break;
|
|
|
|
if ( ++argc == INT_MAX )
|
|
|
|
{
|
|
|
|
errno = E2BIG;
|
|
|
|
goto cleanup_filename;
|
|
|
|
}
|
|
|
|
}
|
2013-06-11 19:01:49 -04:00
|
|
|
|
|
|
|
argv = new char*[argc+1];
|
2014-05-10 11:21:27 -04:00
|
|
|
if ( !argv )
|
|
|
|
goto cleanup_filename;
|
2013-06-11 19:01:49 -04:00
|
|
|
memset(argv, 0, sizeof(char*) * (argc+1));
|
|
|
|
|
|
|
|
for ( int i = 0; i < argc; i++ )
|
|
|
|
{
|
2014-05-10 11:21:27 -04:00
|
|
|
const char* user_arg;
|
|
|
|
if ( !CopyFromUser(&user_arg, user_argv + i, sizeof(user_arg)) )
|
|
|
|
goto cleanup_argv;
|
|
|
|
if ( !(argv[i] = GetStringFromUser(user_arg)) )
|
|
|
|
goto cleanup_argv;
|
2011-09-21 14:52:29 -04:00
|
|
|
}
|
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
envc = 0;
|
|
|
|
while ( true )
|
2011-09-21 14:52:29 -04:00
|
|
|
{
|
2014-05-10 11:21:27 -04:00
|
|
|
const char* user_env;
|
|
|
|
if ( !CopyFromUser(&user_env, user_envp + envc, sizeof(user_env)) )
|
|
|
|
goto cleanup_argv;
|
|
|
|
if ( !user_env )
|
|
|
|
break;
|
|
|
|
if ( ++envc == INT_MAX )
|
|
|
|
{
|
|
|
|
errno = E2BIG;
|
|
|
|
goto cleanup_argv;
|
|
|
|
}
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
envp = new char*[envc+1];
|
|
|
|
if ( !envp )
|
|
|
|
goto cleanup_argv;
|
|
|
|
memset(envp, 0, sizeof(char*) * (envc+1));
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
for ( int i = 0; i < envc; i++ )
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2014-05-10 11:21:27 -04:00
|
|
|
const char* user_env;
|
|
|
|
if ( !CopyFromUser(&user_env, user_envp + i, sizeof(user_env)) )
|
|
|
|
goto cleanup_envp;
|
|
|
|
if ( !(envp[i] = GetStringFromUser(user_envp[i])) )
|
|
|
|
goto cleanup_envp;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-05-10 11:21:27 -04:00
|
|
|
result = sys_execve_kernel(filename, argc, argv, envc, envp, ®s);
|
2013-06-11 19:01:49 -04:00
|
|
|
|
|
|
|
cleanup_envp:
|
|
|
|
for ( int i = 0; i < envc; i++)
|
|
|
|
delete[] envp[i];
|
|
|
|
delete[] envp;
|
|
|
|
cleanup_argv:
|
|
|
|
for ( int i = 0; i < argc; i++)
|
|
|
|
delete[] argv[i];
|
|
|
|
delete[] argv;
|
|
|
|
cleanup_filename:
|
|
|
|
delete[] filename;
|
|
|
|
cleanup_done:
|
2014-05-10 11:21:27 -04:00
|
|
|
if ( result == 0 )
|
2014-03-03 18:11:13 -05:00
|
|
|
LoadRegisters(®s);
|
2013-06-11 19:01:49 -04:00
|
|
|
return result;
|
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
pid_t sys_tfork(int flags, struct tfork* user_regs)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2013-08-04 14:24:59 -04:00
|
|
|
struct tfork regs;
|
2014-01-19 12:22:53 -05:00
|
|
|
if ( !CopyFromUser(®s, user_regs, sizeof(regs)) )
|
|
|
|
return -1;
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( Signal::IsPending() )
|
|
|
|
return errno = EINTR, -1;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2013-08-31 12:02:53 -04:00
|
|
|
bool making_process = flags == SFFORK;
|
|
|
|
bool making_thread = (flags & (SFPROC | SFPID | SFFD | SFMEM | SFCWD | SFROOT)) == SFPROC;
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// TODO: Properly support tfork(2).
|
2013-08-31 12:02:53 -04:00
|
|
|
if ( !(making_thread || making_process) )
|
2013-06-11 19:01:49 -04:00
|
|
|
return errno = ENOSYS, -1;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-08-04 14:24:59 -04:00
|
|
|
if ( regs.altstack.ss_flags & ~__SS_SUPPORTED_FLAGS )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
size_t stack_alignment = 16;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// TODO: Is it a hack to create a new kernel stack here?
|
|
|
|
Thread* curthread = CurrentThread();
|
2014-03-03 18:11:13 -05:00
|
|
|
size_t newkernelstacksize = curthread->kernelstacksize;
|
|
|
|
uint8_t* newkernelstack = new uint8_t[newkernelstacksize + stack_alignment];
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( !newkernelstack )
|
|
|
|
return -1;
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
uintptr_t stack_aligned = (uintptr_t) newkernelstack;
|
|
|
|
size_t stack_aligned_size = newkernelstacksize;
|
|
|
|
|
|
|
|
if ( ((uintptr_t) stack_aligned) & (stack_alignment-1) )
|
|
|
|
stack_aligned = (stack_aligned + 16) & ~(stack_alignment-1);
|
|
|
|
stack_aligned_size &= 0xFFFFFFF0;
|
|
|
|
|
2013-08-31 12:02:53 -04:00
|
|
|
Process* child_process;
|
|
|
|
if ( making_thread )
|
|
|
|
child_process = CurrentProcess();
|
|
|
|
else if ( !(child_process = CurrentProcess()->Fork()) )
|
|
|
|
{
|
|
|
|
delete[] newkernelstack;
|
|
|
|
return -1;
|
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
struct thread_registers cpuregs;
|
|
|
|
memset(&cpuregs, 0, sizeof(cpuregs));
|
|
|
|
#if defined(__i386__)
|
|
|
|
cpuregs.eip = regs.eip;
|
|
|
|
cpuregs.esp = regs.esp;
|
|
|
|
cpuregs.eax = regs.eax;
|
|
|
|
cpuregs.ebx = regs.ebx;
|
|
|
|
cpuregs.ecx = regs.ecx;
|
|
|
|
cpuregs.edx = regs.edx;
|
|
|
|
cpuregs.edi = regs.edi;
|
|
|
|
cpuregs.esi = regs.esi;
|
|
|
|
cpuregs.ebp = regs.ebp;
|
|
|
|
cpuregs.cs = UCS | URPL;
|
|
|
|
cpuregs.ds = UDS | URPL;
|
|
|
|
cpuregs.ss = UDS | URPL;
|
|
|
|
cpuregs.eflags = FLAGS_RESERVED1 | FLAGS_INTERRUPT | FLAGS_ID;
|
|
|
|
cpuregs.fsbase = regs.fsbase;
|
|
|
|
cpuregs.gsbase = regs.gsbase;
|
|
|
|
cpuregs.cr3 = child_process->addrspace;
|
|
|
|
cpuregs.kernel_stack = stack_aligned + stack_aligned_size;
|
2016-03-26 11:04:05 -04:00
|
|
|
memcpy(&cpuregs.fpuenv, Float::fpu_initialized_regs, 512);
|
2014-03-03 18:11:13 -05:00
|
|
|
#elif defined(__x86_64__)
|
|
|
|
cpuregs.rip = regs.rip;
|
|
|
|
cpuregs.rsp = regs.rsp;
|
|
|
|
cpuregs.rax = regs.rax;
|
|
|
|
cpuregs.rbx = regs.rbx;
|
|
|
|
cpuregs.rcx = regs.rcx;
|
|
|
|
cpuregs.rdx = regs.rdx;
|
|
|
|
cpuregs.rdi = regs.rdi;
|
|
|
|
cpuregs.rsi = regs.rsi;
|
|
|
|
cpuregs.rbp = regs.rbp;
|
|
|
|
cpuregs.r8 = regs.r8;
|
|
|
|
cpuregs.r9 = regs.r9;
|
|
|
|
cpuregs.r10 = regs.r10;
|
|
|
|
cpuregs.r11 = regs.r11;
|
|
|
|
cpuregs.r12 = regs.r12;
|
|
|
|
cpuregs.r13 = regs.r13;
|
|
|
|
cpuregs.r14 = regs.r14;
|
|
|
|
cpuregs.r15 = regs.r15;
|
|
|
|
cpuregs.cs = UCS | URPL;
|
|
|
|
cpuregs.ds = UDS | URPL;
|
|
|
|
cpuregs.ss = UDS | URPL;
|
|
|
|
cpuregs.rflags = FLAGS_RESERVED1 | FLAGS_INTERRUPT | FLAGS_ID;
|
|
|
|
cpuregs.fsbase = regs.fsbase;
|
|
|
|
cpuregs.gsbase = regs.gsbase;
|
|
|
|
cpuregs.cr3 = child_process->addrspace;
|
|
|
|
cpuregs.kernel_stack = stack_aligned + stack_aligned_size;
|
2016-03-26 11:04:05 -04:00
|
|
|
memcpy(&cpuregs.fpuenv, Float::fpu_initialized_regs, 512);
|
2014-03-03 18:11:13 -05:00
|
|
|
#else
|
|
|
|
#warning "You need to implement initializing the registers of the new thread"
|
|
|
|
#endif
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
// If the thread could not be created, make the process commit suicide
|
|
|
|
// in a manner such that we don't wait for its zombie.
|
2014-03-03 18:11:13 -05:00
|
|
|
Thread* thread = CreateKernelThread(child_process, &cpuregs);
|
2013-06-11 19:01:49 -04:00
|
|
|
if ( !thread )
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
{
|
2013-08-31 12:02:53 -04:00
|
|
|
if ( making_process )
|
|
|
|
child_process->AbortConstruction();
|
2013-06-11 19:01:49 -04:00
|
|
|
return -1;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
thread->kernelstackpos = (addr_t) newkernelstack;
|
2014-03-03 18:11:13 -05:00
|
|
|
thread->kernelstacksize = newkernelstacksize;
|
2013-06-11 19:01:49 -04:00
|
|
|
thread->kernelstackmalloced = true;
|
2013-08-04 14:24:59 -04:00
|
|
|
memcpy(&thread->signal_mask, ®s.sigmask, sizeof(sigset_t));
|
|
|
|
memcpy(&thread->signal_stack, ®s.altstack, sizeof(stack_t));
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
StartKernelThread(thread);
|
2011-10-26 18:05:20 -04:00
|
|
|
|
2013-08-31 12:02:53 -04:00
|
|
|
return child_process->pid;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
pid_t sys_getpid(void)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
return CurrentProcess()->pid;
|
|
|
|
}
|
2011-11-02 10:41:35 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
pid_t sys_getppid(void)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2016-11-15 15:08:41 -05:00
|
|
|
Process* process = CurrentProcess();
|
|
|
|
ScopedLock lock(&process_family_lock);
|
|
|
|
if ( !process->parent )
|
2011-11-02 10:41:35 -04:00
|
|
|
return 0;
|
2016-11-15 15:08:41 -05:00
|
|
|
return process->parent->pid;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
2011-11-02 10:41:35 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
pid_t sys_getpgid(pid_t pid)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
2016-11-15 15:08:41 -05:00
|
|
|
ScopedLock lock(&process_family_lock);
|
|
|
|
Process* process = !pid ? CurrentProcess() : CurrentProcess()->GetPTable()->Get(pid);
|
|
|
|
if ( !process )
|
|
|
|
return errno = ESRCH, -1;
|
|
|
|
if ( !process->group )
|
|
|
|
return errno = ESRCH, -1;
|
|
|
|
return process->group->pid;
|
2013-06-11 19:01:49 -04:00
|
|
|
}
|
2011-11-02 10:41:35 -04:00
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
pid_t sys_getsid(pid_t pid)
|
2013-06-11 19:02:01 -04:00
|
|
|
{
|
2016-11-15 15:08:41 -05:00
|
|
|
ScopedLock lock(&process_family_lock);
|
2014-03-19 18:34:09 -04:00
|
|
|
Process* process = !pid ? CurrentProcess() : CurrentProcess()->GetPTable()->Get(pid);
|
2013-06-11 19:02:01 -04:00
|
|
|
if ( !process )
|
|
|
|
return errno = ESRCH, -1;
|
2016-11-15 15:08:41 -05:00
|
|
|
if ( !process->session )
|
|
|
|
return errno = ESRCH, -1;
|
|
|
|
return process->session->pid;
|
2013-06-11 19:02:01 -04:00
|
|
|
}
|
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
int sys_setpgid(pid_t pid, pid_t pgid)
|
2013-06-11 19:02:01 -04:00
|
|
|
{
|
2016-11-15 15:08:41 -05:00
|
|
|
if ( pid < 0 || pgid < 0 )
|
|
|
|
return errno = EINVAL, -1;
|
|
|
|
|
2013-06-11 19:02:01 -04:00
|
|
|
// TODO: Either prevent changing the process group after an exec or provide
|
|
|
|
// a version of this system call with a flags parameter that lets you
|
|
|
|
// decide if you want this behavior. This will fix a race condition
|
|
|
|
// where the shell spawns a child and both parent and child sets the
|
|
|
|
// process group, but the child sets the process group and execve's
|
|
|
|
// and the new program image exploits this 'bug' and also changes the
|
|
|
|
// process group, and then the shell gets around to change the process
|
|
|
|
// group. This probably unlikely, but correctness over all!
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
Process* current_process = CurrentProcess();
|
|
|
|
|
|
|
|
ScopedLock lock(&process_family_lock);
|
|
|
|
|
2013-06-11 19:02:01 -04:00
|
|
|
// Find the processes in question.
|
2014-03-19 18:34:09 -04:00
|
|
|
Process* process = !pid ? CurrentProcess() : CurrentProcess()->GetPTable()->Get(pid);
|
2013-06-11 19:02:01 -04:00
|
|
|
if ( !process )
|
|
|
|
return errno = ESRCH, -1;
|
2014-03-19 18:34:09 -04:00
|
|
|
Process* group = !pgid ? process : CurrentProcess()->GetPTable()->Get(pgid);
|
2013-06-11 19:02:01 -04:00
|
|
|
if ( !group )
|
|
|
|
return errno = ESRCH, -1;
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
// The process must be this one or a direct child.
|
|
|
|
if ( process != current_process && process->parent != current_process )
|
|
|
|
return errno = EPERM, -1;
|
|
|
|
// The process must be in this session.
|
|
|
|
if ( process->session != current_process->session )
|
|
|
|
return errno = EPERM, -1;
|
|
|
|
// The new group must be in the same session as the process.
|
|
|
|
if ( group->session != process->session )
|
|
|
|
return errno = EPERM, -1;
|
|
|
|
// The process must not be a process group leader.
|
|
|
|
// TODO: Maybe POSIX actually allows this.
|
|
|
|
if ( process->groupfirst )
|
|
|
|
return errno = EPERM, -1;
|
|
|
|
// The process must not be a session leader.
|
|
|
|
if ( process->sessionfirst )
|
|
|
|
return errno = EPERM, -1;
|
|
|
|
// The group must either exist or be the process itself.
|
|
|
|
if ( !group->groupfirst && group != process )
|
|
|
|
return errno = EPERM, -1;
|
2013-06-11 19:02:01 -04:00
|
|
|
|
|
|
|
// Exit early if this is a noop.
|
|
|
|
if ( process->group == group )
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Remove the process from its current process group.
|
2016-11-15 15:08:41 -05:00
|
|
|
if ( process->group )
|
|
|
|
process->group->GroupRemoveMember(process);
|
2013-06-11 19:02:01 -04:00
|
|
|
|
|
|
|
// Insert the process into its new process group.
|
|
|
|
process->groupprev = NULL;
|
|
|
|
process->groupnext = group->groupfirst;
|
|
|
|
if ( group->groupfirst )
|
|
|
|
group->groupfirst->groupprev = process;
|
|
|
|
group->groupfirst = process;
|
|
|
|
process->group = group;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-15 15:08:41 -05:00
|
|
|
pid_t sys_setsid(void)
|
|
|
|
{
|
|
|
|
Process* process = CurrentProcess();
|
|
|
|
|
|
|
|
ScopedLock lock(&process_family_lock);
|
|
|
|
|
|
|
|
// Test if already a process group leader.
|
|
|
|
if ( process->group == process )
|
|
|
|
return errno = EPERM, -1;
|
|
|
|
|
|
|
|
// Remove the process from its current process group.
|
|
|
|
if ( process->group )
|
|
|
|
process->group->GroupRemoveMember(process);
|
|
|
|
|
|
|
|
// Remove the process from its current session.
|
|
|
|
if ( process->session )
|
|
|
|
process->session->SessionRemoveMember(process);
|
|
|
|
|
|
|
|
// Insert the process into its new session.
|
|
|
|
process->sessionprev = NULL;
|
|
|
|
process->sessionnext = NULL;
|
|
|
|
process->sessionfirst = process;
|
|
|
|
process->session = process;
|
|
|
|
|
|
|
|
// Insert the process into its new process group.
|
|
|
|
process->groupprev = NULL;
|
|
|
|
process->groupnext = NULL;
|
|
|
|
process->groupfirst = process;
|
|
|
|
process->group = process;
|
|
|
|
|
|
|
|
return process->pid;
|
|
|
|
}
|
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
size_t sys_getpagesize(void)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
return Page::Size();
|
|
|
|
}
|
2012-01-08 08:58:57 -05:00
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
mode_t sys_umask(mode_t newmask)
|
2013-06-11 19:01:49 -04:00
|
|
|
{
|
|
|
|
Process* process = CurrentProcess();
|
|
|
|
ScopedLock lock(&process->idlock);
|
|
|
|
mode_t oldmask = process->umask;
|
|
|
|
process->umask = newmask & 0666;
|
|
|
|
return oldmask;
|
|
|
|
}
|
2013-05-16 16:03:15 -04:00
|
|
|
|
2014-10-16 18:04:47 -04:00
|
|
|
mode_t sys_getumask(void)
|
2014-01-19 16:45:49 -05:00
|
|
|
{
|
|
|
|
Process* process = CurrentProcess();
|
|
|
|
ScopedLock lock(&process->idlock);
|
|
|
|
return process->umask;
|
|
|
|
}
|
|
|
|
|
2015-10-27 12:34:54 -04:00
|
|
|
static void GetAssertInfo(struct scram_assert* info,
|
|
|
|
const void* user_info_ptr)
|
|
|
|
{
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
struct scram_assert user_info;
|
|
|
|
if ( !CopyFromUser(&user_info, user_info_ptr, sizeof(user_info)) )
|
|
|
|
return;
|
|
|
|
info->filename = GetStringFromUser(user_info.filename);
|
|
|
|
info->line = user_info.line;
|
|
|
|
info->function = GetStringFromUser(user_info.function);
|
|
|
|
info->expression = GetStringFromUser(user_info.expression);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void FreeAssertInfo(struct scram_assert* info)
|
|
|
|
{
|
|
|
|
delete[] (char*) info->filename;
|
|
|
|
delete[] (char*) info->function;
|
|
|
|
delete[] (char*) info->expression;
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
void GetUndefinedBehaviorInfo(struct scram_undefined_behavior* info,
|
|
|
|
const void* user_info_ptr)
|
|
|
|
{
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
struct scram_undefined_behavior user_info;
|
|
|
|
if ( !CopyFromUser(&user_info, user_info_ptr, sizeof(user_info)) )
|
|
|
|
return;
|
|
|
|
info->filename = GetStringFromUser(user_info.filename);
|
|
|
|
info->line = user_info.line;
|
|
|
|
info->column = user_info.column;
|
|
|
|
info->violation = GetStringFromUser(user_info.violation);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void FreeUndefinedBehaviorInfo(struct scram_undefined_behavior* info)
|
|
|
|
{
|
|
|
|
delete[] info->filename;
|
|
|
|
delete[] info->violation;
|
|
|
|
}
|
|
|
|
|
|
|
|
__attribute__((noreturn))
|
|
|
|
void sys_scram(int event, const void* user_info)
|
|
|
|
{
|
|
|
|
Process* process = CurrentProcess();
|
|
|
|
// TODO: Prohibit execve such that program_image_path is protected.
|
|
|
|
process->ExitThroughSignal(SIGABRT);
|
|
|
|
if ( event == SCRAM_ASSERT )
|
|
|
|
{
|
|
|
|
struct scram_assert info;
|
|
|
|
GetAssertInfo(&info, user_info);
|
|
|
|
Log::PrintF("%s[%ji]: Assertion failure: %s:%lu: %s: %s\n",
|
|
|
|
process->program_image_path,
|
|
|
|
(intmax_t) process->pid,
|
|
|
|
info.filename ? info.filename : "<unknown>",
|
|
|
|
info.line,
|
|
|
|
info.function ? info.function : "<unknown>",
|
|
|
|
info.expression ? info.expression : "<unknown>");
|
|
|
|
FreeAssertInfo(&info);
|
|
|
|
}
|
|
|
|
else if ( event == SCRAM_STACK_SMASH )
|
|
|
|
{
|
|
|
|
Log::PrintF("%s[%ji]: Stack smashing detected\n",
|
|
|
|
process->program_image_path,
|
|
|
|
(intmax_t) process->pid);
|
|
|
|
}
|
|
|
|
else if ( event == SCRAM_UNDEFINED_BEHAVIOR )
|
|
|
|
{
|
|
|
|
struct scram_undefined_behavior info;
|
|
|
|
GetUndefinedBehaviorInfo(&info, user_info);
|
|
|
|
Log::PrintF("%s[%ji]: Undefined behavior: %s at %s:%lu:%lu\n",
|
|
|
|
process->program_image_path,
|
|
|
|
(intmax_t) process->pid,
|
|
|
|
info.violation ? info.violation : "<unknown>",
|
|
|
|
info.filename ? info.filename : "<unknown>",
|
|
|
|
info.line,
|
|
|
|
info.column);
|
|
|
|
FreeUndefinedBehaviorInfo(&info);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Log::PrintF("%s[%ji]: Unknown scram event %i\n",
|
|
|
|
process->program_image_path,
|
|
|
|
(intmax_t) process->pid,
|
|
|
|
event);
|
|
|
|
}
|
2016-05-13 19:14:26 -04:00
|
|
|
// TODO: Allow debugging this event (and see signal.cpp sigreturn).
|
2015-10-27 12:34:54 -04:00
|
|
|
kthread_exit();
|
|
|
|
}
|
|
|
|
|
2013-06-11 19:01:49 -04:00
|
|
|
} // namespace Sortix
|