2020-01-18 08:38:21 +00:00
/*
2021-03-09 21:35:13 +00:00
* Copyright ( c ) 2018 - 2021 , Andreas Kling < kling @ serenityos . org >
2020-01-18 08:38:21 +00:00
*
2021-04-22 08:24:48 +00:00
* SPDX - License - Identifier : BSD - 2 - Clause
2020-01-18 08:38:21 +00:00
*/
2021-01-01 05:45:16 +00:00
# include <AK/ScopeGuard.h>
2019-07-25 19:02:19 +00:00
# include <AK/StringBuilder.h>
2020-11-15 18:58:19 +00:00
# include <AK/Time.h>
2021-02-25 16:25:34 +00:00
# include <Kernel/Arch/x86/SmapDisabler.h>
2021-06-21 15:34:09 +00:00
# include <Kernel/Arch/x86/TrapFrame.h>
2021-01-25 15:07:10 +00:00
# include <Kernel/Debug.h>
2019-06-07 07:36:51 +00:00
# include <Kernel/FileSystem/FileDescription.h>
2020-02-16 00:27:42 +00:00
# include <Kernel/KSyms.h>
2021-03-09 21:35:13 +00:00
# include <Kernel/Panic.h>
2021-01-11 08:52:18 +00:00
# include <Kernel/PerformanceEventBuffer.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Process.h>
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
# include <Kernel/ProcessExposed.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Scheduler.h>
2021-06-22 15:40:16 +00:00
# include <Kernel/Sections.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Thread.h>
2020-03-28 08:47:16 +00:00
# include <Kernel/ThreadTracer.h>
2020-04-26 09:32:37 +00:00
# include <Kernel/TimerQueue.h>
2019-04-03 13:13:07 +00:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 00:33:41 +00:00
# include <Kernel/VM/PageDirectory.h>
2020-03-01 14:38:09 +00:00
# include <Kernel/VM/ProcessPagingScope.h>
2019-03-23 21:03:17 +00:00
# include <LibC/signal_numbers.h>
2020-02-16 00:27:42 +00:00
namespace Kernel {
2021-01-28 05:58:24 +00:00
SpinLock < u8 > Thread : : g_tid_map_lock ;
2021-02-14 16:40:34 +00:00
READONLY_AFTER_INIT HashMap < ThreadID , Thread * > * Thread : : g_tid_map ;
2021-01-28 05:58:24 +00:00
2021-02-19 17:41:50 +00:00
UNMAP_AFTER_INIT void Thread : : initialize ( )
2021-01-28 05:58:24 +00:00
{
g_tid_map = new HashMap < ThreadID , Thread * > ( ) ;
}
2021-02-07 17:13:51 +00:00
KResultOr < NonnullRefPtr < Thread > > Thread : : try_create ( NonnullRefPtr < Process > process )
{
2021-07-15 10:08:35 +00:00
auto fpu_state = try_make < FPUState > ( ) ;
2021-07-01 20:32:59 +00:00
if ( ! fpu_state )
return ENOMEM ;
2021-02-14 00:25:22 +00:00
auto kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , { } , Region : : Access : : Read | Region : : Access : : Write , AllocationStrategy : : AllocateNow ) ;
2021-02-07 17:13:51 +00:00
if ( ! kernel_stack_region )
return ENOMEM ;
2021-02-07 19:13:51 +00:00
kernel_stack_region - > set_stack ( true ) ;
2021-05-11 10:53:38 +00:00
2021-06-20 08:21:16 +00:00
auto block_timer = AK : : try_create < Timer > ( ) ;
2021-05-19 22:41:51 +00:00
if ( ! block_timer )
return ENOMEM ;
2021-07-15 10:08:35 +00:00
auto thread = adopt_ref_if_nonnull ( new ( nothrow ) Thread ( move ( process ) , kernel_stack_region . release_nonnull ( ) , block_timer . release_nonnull ( ) , fpu_state . release_nonnull ( ) ) ) ;
2021-05-11 10:53:38 +00:00
if ( ! thread )
return ENOMEM ;
return thread . release_nonnull ( ) ;
2021-02-07 17:13:51 +00:00
}
2021-07-15 10:08:35 +00:00
Thread : : Thread ( NonnullRefPtr < Process > process , NonnullOwnPtr < Region > kernel_stack_region , NonnullRefPtr < Timer > block_timer , NonnullOwnPtr < FPUState > fpu_state )
2020-08-02 02:04:56 +00:00
: m_process ( move ( process ) )
2021-02-07 17:13:51 +00:00
, m_kernel_stack_region ( move ( kernel_stack_region ) )
2021-07-15 10:08:35 +00:00
, m_fpu_state ( move ( fpu_state ) )
2020-08-02 02:04:56 +00:00
, m_name ( m_process - > name ( ) )
2021-05-19 22:41:51 +00:00
, m_block_timer ( block_timer )
2021-07-10 23:40:26 +00:00
, m_global_procfs_inode_index ( ProcFSComponentRegistry : : the ( ) . allocate_inode_index ( ) )
2019-05-18 16:31:36 +00:00
{
2021-01-23 06:24:33 +00:00
bool is_first_thread = m_process - > add_thread ( * this ) ;
2021-01-01 05:45:16 +00:00
if ( is_first_thread ) {
2019-12-22 10:51:24 +00:00
// First thread gets TID == PID
2020-08-08 15:32:34 +00:00
m_tid = m_process - > pid ( ) . value ( ) ;
2019-12-22 10:51:24 +00:00
} else {
2020-08-08 15:32:34 +00:00
m_tid = Process : : allocate_pid ( ) . value ( ) ;
2019-12-22 10:51:24 +00:00
}
2021-02-07 17:13:51 +00:00
2021-05-28 07:33:14 +00:00
{
// FIXME: Go directly to KString
auto string = String : : formatted ( " Kernel stack (thread {}) " , m_tid . value ( ) ) ;
m_kernel_stack_region - > set_name ( KString : : try_create ( string ) ) ;
}
2021-02-07 17:13:51 +00:00
2021-01-28 05:58:24 +00:00
{
ScopedSpinLock lock ( g_tid_map_lock ) ;
auto result = g_tid_map - > set ( m_tid , this ) ;
2021-02-23 19:42:32 +00:00
VERIFY ( result = = AK : : HashSetResult : : InsertedNewEntry ) ;
2021-01-28 05:58:24 +00:00
}
2021-01-23 22:59:27 +00:00
if constexpr ( THREAD_DEBUG )
2021-01-12 21:30:52 +00:00
dbgln ( " Created new thread {}({}:{}) " , m_process - > name ( ) , m_process - > pid ( ) . value ( ) , m_tid . value ( ) ) ;
2021-02-21 10:03:49 +00:00
2020-02-18 12:44:27 +00:00
reset_fpu_state ( ) ;
2019-03-23 21:03:17 +00:00
2021-06-23 19:54:41 +00:00
# if ARCH(I386)
2019-03-23 21:03:17 +00:00
// Only IF is set when a process boots.
2021-06-26 17:57:16 +00:00
m_regs . eflags = 0x0202 ;
2019-03-23 21:03:17 +00:00
2020-09-10 15:46:24 +00:00
if ( m_process - > is_kernel_process ( ) ) {
2021-06-26 17:57:16 +00:00
m_regs . cs = GDT_SELECTOR_CODE0 ;
m_regs . ds = GDT_SELECTOR_DATA0 ;
m_regs . es = GDT_SELECTOR_DATA0 ;
2021-07-02 12:02:36 +00:00
m_regs . fs = 0 ;
2021-06-26 17:57:16 +00:00
m_regs . ss = GDT_SELECTOR_DATA0 ;
2021-07-02 12:02:36 +00:00
m_regs . gs = GDT_SELECTOR_PROC ;
2019-03-23 21:03:17 +00:00
} else {
2021-06-26 17:57:16 +00:00
m_regs . cs = GDT_SELECTOR_CODE3 | 3 ;
m_regs . ds = GDT_SELECTOR_DATA3 | 3 ;
m_regs . es = GDT_SELECTOR_DATA3 | 3 ;
m_regs . fs = GDT_SELECTOR_DATA3 | 3 ;
m_regs . ss = GDT_SELECTOR_DATA3 | 3 ;
m_regs . gs = GDT_SELECTOR_TLS | 3 ;
2019-03-23 21:03:17 +00:00
}
2021-06-23 19:54:41 +00:00
# else
2021-06-27 11:59:41 +00:00
// Only IF is set when a process boots.
2021-06-26 17:57:16 +00:00
m_regs . rflags = 0x0202 ;
2021-06-27 11:59:41 +00:00
if ( m_process - > is_kernel_process ( ) )
m_regs . cs = GDT_SELECTOR_CODE0 ;
else
m_regs . cs = GDT_SELECTOR_CODE3 | 3 ;
2021-06-23 19:54:41 +00:00
# endif
2019-03-23 21:03:17 +00:00
2021-06-26 17:57:16 +00:00
m_regs . cr3 = m_process - > space ( ) . page_directory ( ) . cr3 ( ) ;
2020-01-27 11:52:10 +00:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2020-09-10 15:46:24 +00:00
if ( m_process - > is_kernel_process ( ) ) {
2021-06-26 17:57:16 +00:00
# if ARCH(I386)
m_regs . esp = m_regs . esp0 = m_kernel_stack_top ;
# else
m_regs . rsp = m_regs . rsp0 = m_kernel_stack_top ;
# endif
2019-03-23 21:03:17 +00:00
} else {
2020-01-27 11:52:10 +00:00
// Ring 3 processes get a separate stack for ring 0.
// The ring 3 stack will be assigned by exec().
2021-06-26 17:57:16 +00:00
# if ARCH(I386)
m_regs . ss0 = GDT_SELECTOR_DATA0 ;
m_regs . esp0 = m_kernel_stack_top ;
2021-06-23 19:54:41 +00:00
# else
2021-06-26 17:57:16 +00:00
m_regs . rsp0 = m_kernel_stack_top ;
2021-06-23 19:54:41 +00:00
# endif
2021-06-26 17:57:16 +00:00
}
2019-03-23 21:03:17 +00:00
2020-09-27 14:53:35 +00:00
// We need to add another reference if we could successfully create
// all the resources needed for this thread. The reason for this is that
// we don't want to delete this thread after dropping the reference,
// it may still be running or scheduled to be run.
// The finalizer is responsible for dropping this reference once this
// thread is ready to be cleaned up.
ref ( ) ;
2019-03-23 21:03:17 +00:00
}
Thread : : ~ Thread ( )
{
2020-11-11 23:05:00 +00:00
{
// We need to explicitly remove ourselves from the thread list
// here. We may get pre-empted in the middle of destructing this
// thread, which causes problems if the thread list is iterated.
// Specifically, if this is the last thread of a process, checking
// block conditions would access m_process, which would be in
// the middle of being destroyed.
ScopedSpinLock lock ( g_scheduler_lock ) ;
2021-02-23 19:42:32 +00:00
VERIFY ( ! m_process_thread_list_node . is_in_list ( ) ) ;
2021-01-22 23:56:08 +00:00
// We shouldn't be queued
2021-02-23 19:42:32 +00:00
VERIFY ( m_runnable_priority < 0 ) ;
2021-01-28 05:58:24 +00:00
}
{
ScopedSpinLock lock ( g_tid_map_lock ) ;
auto result = g_tid_map - > remove ( m_tid ) ;
2021-02-23 19:42:32 +00:00
VERIFY ( result ) ;
2020-11-11 23:05:00 +00:00
}
2020-11-29 23:05:27 +00:00
}
2020-11-11 23:05:00 +00:00
2021-07-17 19:09:51 +00:00
void Thread : : block ( Kernel : : Mutex & lock , ScopedSpinLock < SpinLock < u8 > > & lock_lock , u32 lock_count )
2021-07-10 16:23:16 +00:00
{
VERIFY ( ! Processor : : current ( ) . in_irq ( ) ) ;
VERIFY ( this = = Thread : : current ( ) ) ;
ScopedCritical critical ;
VERIFY ( ! s_mm_lock . own_lock ( ) ) ;
ScopedSpinLock block_lock ( m_block_lock ) ;
VERIFY ( ! m_in_block ) ;
m_in_block = true ;
ScopedSpinLock scheduler_lock ( g_scheduler_lock ) ;
switch ( state ( ) ) {
case Thread : : Stopped :
// It's possible that we were requested to be stopped!
break ;
case Thread : : Running :
VERIFY ( m_blocker = = nullptr ) ;
break ;
default :
VERIFY_NOT_REACHED ( ) ;
}
VERIFY ( ! m_blocking_lock ) ;
m_blocking_lock = & lock ;
m_lock_requested_count = lock_count ;
set_state ( Thread : : Blocked ) ;
scheduler_lock . unlock ( ) ;
block_lock . unlock ( ) ;
lock_lock . unlock ( ) ;
2021-07-17 19:09:51 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} blocking on Mutex {} " , * this , & lock ) ;
2021-07-10 16:23:16 +00:00
2021-07-16 01:38:07 +00:00
auto & big_lock = process ( ) . big_lock ( ) ;
2021-07-10 16:23:16 +00:00
for ( ; ; ) {
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY ( ! g_scheduler_lock . own_lock ( ) ) ;
VERIFY ( Processor : : current ( ) . in_critical ( ) ) ;
2021-07-16 01:38:07 +00:00
if ( & lock ! = & big_lock & & big_lock . own_lock ( ) ) {
// We're locking another lock and already hold the big lock...
// We need to release the big lock
2021-07-16 01:45:22 +00:00
yield_and_release_relock_big_lock ( ) ;
2021-07-16 01:38:07 +00:00
} else {
2021-07-16 01:45:22 +00:00
yield_assuming_not_holding_big_lock ( ) ;
2021-07-16 01:38:07 +00:00
}
2021-07-10 16:23:16 +00:00
VERIFY ( Processor : : current ( ) . in_critical ( ) ) ;
ScopedSpinLock block_lock2 ( m_block_lock ) ;
if ( should_be_stopped ( ) | | state ( ) = = Stopped ) {
dbgln ( " Thread should be stopped, current state: {} " , state_string ( ) ) ;
set_state ( Thread : : Blocked ) ;
continue ;
}
VERIFY ( ! m_blocking_lock ) ;
VERIFY ( m_in_block ) ;
m_in_block = false ;
break ;
}
lock_lock . lock ( ) ;
}
2021-07-17 19:09:51 +00:00
u32 Thread : : unblock_from_lock ( Kernel : : Mutex & lock )
2021-07-10 16:23:16 +00:00
{
ScopedSpinLock block_lock ( m_block_lock ) ;
VERIFY ( m_blocking_lock = = & lock ) ;
auto requested_count = m_lock_requested_count ;
block_lock . unlock ( ) ;
auto do_unblock = [ & ] ( ) {
ScopedSpinLock scheduler_lock ( g_scheduler_lock ) ;
ScopedSpinLock block_lock ( m_block_lock ) ;
VERIFY ( m_blocking_lock = = & lock ) ;
VERIFY ( ! Processor : : current ( ) . in_irq ( ) ) ;
VERIFY ( g_scheduler_lock . own_lock ( ) ) ;
VERIFY ( m_block_lock . own_lock ( ) ) ;
VERIFY ( m_blocking_lock = = & lock ) ;
2021-07-17 19:09:51 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} unblocked from Mutex {} " , * this , & lock ) ;
2021-07-10 16:23:16 +00:00
m_blocking_lock = nullptr ;
if ( Thread : : current ( ) = = this ) {
set_state ( Thread : : Running ) ;
return ;
}
VERIFY ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
set_state ( Thread : : Runnable ) ;
} ;
if ( Processor : : current ( ) . in_irq ( ) ) {
Processor : : current ( ) . deferred_call_queue ( [ do_unblock = move ( do_unblock ) , self = make_weak_ptr ( ) ] ( ) {
if ( auto this_thread = self . strong_ref ( ) )
do_unblock ( ) ;
} ) ;
} else {
do_unblock ( ) ;
}
return requested_count ;
}
2020-11-29 23:05:27 +00:00
void Thread : : unblock_from_blocker ( Blocker & blocker )
{
2020-12-09 04:18:45 +00:00
auto do_unblock = [ & ] ( ) {
ScopedSpinLock scheduler_lock ( g_scheduler_lock ) ;
ScopedSpinLock block_lock ( m_block_lock ) ;
if ( m_blocker ! = & blocker )
return ;
if ( ! should_be_stopped ( ) & & ! is_stopped ( ) )
unblock ( ) ;
} ;
if ( Processor : : current ( ) . in_irq ( ) ) {
Processor : : current ( ) . deferred_call_queue ( [ do_unblock = move ( do_unblock ) , self = make_weak_ptr ( ) ] ( ) {
if ( auto this_thread = self . strong_ref ( ) )
do_unblock ( ) ;
} ) ;
} else {
do_unblock ( ) ;
}
2019-03-23 21:03:17 +00:00
}
2020-11-29 23:05:27 +00:00
void Thread : : unblock ( u8 signal )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( ! Processor : : current ( ) . in_irq ( ) ) ;
VERIFY ( g_scheduler_lock . own_lock ( ) ) ;
VERIFY ( m_block_lock . own_lock ( ) ) ;
2020-11-29 23:05:27 +00:00
if ( m_state ! = Thread : : Blocked )
return ;
2021-07-10 16:23:16 +00:00
if ( m_blocking_lock )
return ;
2021-02-23 19:42:32 +00:00
VERIFY ( m_blocker ) ;
2020-12-09 04:18:45 +00:00
if ( signal ! = 0 ) {
2021-01-20 23:06:19 +00:00
if ( is_handling_page_fault ( ) ) {
// Don't let signals unblock threads that are blocked inside a page fault handler.
// This prevents threads from EINTR'ing the inode read in an inode page fault.
// FIXME: There's probably a better way to solve this.
return ;
}
2020-12-09 04:18:45 +00:00
if ( ! m_blocker - > can_be_interrupted ( ) & & ! m_should_die )
return ;
2020-11-29 23:05:27 +00:00
m_blocker - > set_interrupted_by_signal ( signal ) ;
2020-12-09 04:18:45 +00:00
}
2020-04-06 12:38:33 +00:00
m_blocker = nullptr ;
2020-06-28 21:34:31 +00:00
if ( Thread : : current ( ) = = this ) {
2020-08-10 20:05:24 +00:00
set_state ( Thread : : Running ) ;
2019-03-23 21:03:17 +00:00
return ;
}
2021-02-23 19:42:32 +00:00
VERIFY ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
2020-08-10 20:05:24 +00:00
set_state ( Thread : : Runnable ) ;
2019-03-23 21:03:17 +00:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
void Thread : : set_should_die ( )
{
2019-12-22 10:35:02 +00:00
if ( m_should_die ) {
2021-01-12 21:30:52 +00:00
dbgln ( " {} Should already die " , * this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
return ;
2019-12-22 10:35:02 +00:00
}
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
// Remember that we should die instead of returning to
// the userspace.
2020-12-08 04:29:41 +00:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
m_should_die = true ;
// NOTE: Even the current thread can technically be in "Stopped"
// state! This is the case when another thread sent a SIGSTOP to
// it while it was running and it calls e.g. exit() before
// the scheduler gets involved again.
if ( is_stopped ( ) ) {
// If we were stopped, we need to briefly resume so that
// the kernel stacks can clean up. We won't ever return back
// to user mode, though
2021-02-23 19:42:32 +00:00
VERIFY ( ! process ( ) . is_stopped ( ) ) ;
2020-12-08 04:29:41 +00:00
resume_from_stopped ( ) ;
2020-08-14 16:24:31 +00:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
if ( is_blocked ( ) ) {
2020-12-08 04:29:41 +00:00
ScopedSpinLock block_lock ( m_block_lock ) ;
if ( m_blocker ) {
// We're blocked in the kernel.
m_blocker - > set_interrupted_by_death ( ) ;
unblock ( ) ;
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
}
}
void Thread : : die_if_needed ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
if ( ! m_should_die )
return ;
2020-12-14 23:36:22 +00:00
u32 unlock_count ;
2020-12-20 23:09:48 +00:00
[[maybe_unused]] auto rc = unlock_process_if_locked ( unlock_count ) ;
2019-12-22 11:34:38 +00:00
2021-06-06 09:40:11 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} is dying " , * this ) ;
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
// It's possible that we don't reach the code after this block if the
// scheduler is invoked and FinalizerTask cleans up this thread, however
// that doesn't matter because we're trying to invoke the scheduler anyway
set_state ( Thread : : Dying ) ;
}
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
2020-07-04 23:37:36 +00:00
2020-07-05 20:32:07 +00:00
// Flag a context switch. Because we're in a critical section,
2021-02-10 20:18:03 +00:00
// Scheduler::yield will actually only mark a pending context switch
2020-07-05 20:32:07 +00:00
// Simply leaving the critical section would not necessarily trigger
// a switch.
2020-06-27 19:42:28 +00:00
Scheduler : : yield ( ) ;
2020-07-04 23:37:36 +00:00
2020-07-05 20:32:07 +00:00
// Now leave the critical section so that we can also trigger the
// actual context switch
u32 prev_flags ;
Processor : : current ( ) . clear_critical ( prev_flags , false ) ;
2021-01-08 23:42:44 +00:00
dbgln ( " die_if_needed returned from clear_critical!!! in irq: {} " , Processor : : current ( ) . in_irq ( ) ) ;
2020-07-05 20:32:07 +00:00
// We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
}
2020-11-17 03:51:34 +00:00
void Thread : : exit ( void * exit_value )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = this ) ;
2020-11-29 23:05:27 +00:00
m_join_condition . thread_did_exit ( exit_value ) ;
2020-11-17 03:51:34 +00:00
set_should_die ( ) ;
2020-12-14 23:36:22 +00:00
u32 unlock_count ;
2020-12-20 23:09:48 +00:00
[[maybe_unused]] auto rc = unlock_process_if_locked ( unlock_count ) ;
2021-05-28 09:18:58 +00:00
if ( m_thread_specific_range . has_value ( ) ) {
auto * region = process ( ) . space ( ) . find_region_from_range ( m_thread_specific_range . value ( ) ) ;
2021-07-17 13:07:02 +00:00
process ( ) . space ( ) . deallocate_region ( * region ) ;
2021-05-28 09:18:58 +00:00
}
2020-11-17 03:51:34 +00:00
die_if_needed ( ) ;
}
2021-07-16 01:45:22 +00:00
void Thread : : yield_assuming_not_holding_big_lock ( )
2020-12-08 04:29:41 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( ! g_scheduler_lock . own_lock ( ) ) ;
2021-07-16 01:45:22 +00:00
VERIFY ( ! process ( ) . big_lock ( ) . own_lock ( ) ) ;
2021-07-10 16:23:16 +00:00
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable ;
Scheduler : : yield ( ) ; // flag a switch
2020-12-08 04:29:41 +00:00
u32 prev_flags ;
u32 prev_crit = Processor : : current ( ) . clear_critical ( prev_flags , true ) ;
// NOTE: We may be on a different CPU now!
Processor : : current ( ) . restore_critical ( prev_crit , prev_flags ) ;
}
2021-07-16 01:45:22 +00:00
void Thread : : yield_and_release_relock_big_lock ( )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( ! g_scheduler_lock . own_lock ( ) ) ;
2021-07-10 16:23:16 +00:00
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable ;
Scheduler : : yield ( ) ; // flag a switch
2020-12-14 23:36:22 +00:00
u32 lock_count_to_restore = 0 ;
auto previous_locked = unlock_process_if_locked ( lock_count_to_restore ) ;
2020-09-26 03:44:43 +00:00
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
2020-12-14 23:36:22 +00:00
relock_process ( previous_locked , lock_count_to_restore ) ;
2019-03-23 21:03:17 +00:00
}
2019-12-01 10:57:20 +00:00
2020-12-14 23:36:22 +00:00
LockMode Thread : : unlock_process_if_locked ( u32 & lock_count_to_restore )
2020-12-09 04:18:45 +00:00
{
2020-12-14 23:36:22 +00:00
return process ( ) . big_lock ( ) . force_unlock_if_locked ( lock_count_to_restore ) ;
2020-12-09 04:18:45 +00:00
}
2020-12-14 23:36:22 +00:00
void Thread : : relock_process ( LockMode previous_locked , u32 lock_count_to_restore )
2019-12-01 14:54:47 +00:00
{
2020-09-26 03:44:43 +00:00
// Clearing the critical section may trigger the context switch
2021-07-05 21:07:18 +00:00
// flagged by calling Scheduler::yield above.
// We have to do it this way because we intentionally
2020-09-26 03:44:43 +00:00
// leave the critical section here to be able to switch contexts.
2021-01-27 22:23:21 +00:00
u32 prev_flags ;
u32 prev_crit = Processor : : current ( ) . clear_critical ( prev_flags , true ) ;
// CONTEXT SWITCH HAPPENS HERE!
2020-09-26 03:44:43 +00:00
2021-01-27 22:23:21 +00:00
// NOTE: We may be on a different CPU now!
Processor : : current ( ) . restore_critical ( prev_crit , prev_flags ) ;
2020-12-14 23:36:22 +00:00
if ( previous_locked ! = LockMode : : Unlocked ) {
// We've unblocked, relock the process if needed and carry on.
2021-04-24 22:24:30 +00:00
process ( ) . big_lock ( ) . restore_lock ( previous_locked , lock_count_to_restore ) ;
2020-12-14 23:36:22 +00:00
}
2019-12-01 10:57:20 +00:00
}
2019-03-23 21:03:17 +00:00
2021-02-27 22:56:16 +00:00
auto Thread : : sleep ( clockid_t clock_id , const Time & duration , Time * remaining_time ) - > BlockResult
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( state ( ) = = Thread : : Running ) ;
2021-01-10 23:29:28 +00:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( { } , Thread : : BlockTimeout ( false , & duration , nullptr , clock_id ) , remaining_time ) ;
2020-11-15 18:58:19 +00:00
}
2021-02-27 22:56:16 +00:00
auto Thread : : sleep_until ( clockid_t clock_id , const Time & deadline ) - > BlockResult
2020-11-15 18:58:19 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( state ( ) = = Thread : : Running ) ;
2021-01-10 23:29:28 +00:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( { } , Thread : : BlockTimeout ( true , & deadline , nullptr , clock_id ) ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-19 07:51:48 +00:00
const char * Thread : : state_string ( ) const
2019-03-23 21:03:17 +00:00
{
2019-07-19 07:51:48 +00:00
switch ( state ( ) ) {
2019-06-07 09:43:58 +00:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
2020-09-27 14:53:35 +00:00
case Thread : : Blocked : {
2020-12-08 04:29:41 +00:00
ScopedSpinLock block_lock ( m_block_lock ) ;
2021-07-10 16:23:16 +00:00
if ( m_blocking_lock )
2021-07-17 19:09:51 +00:00
return " Mutex " ;
2021-07-10 16:23:16 +00:00
if ( m_blocker )
return m_blocker - > state_string ( ) ;
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
}
2020-09-27 14:53:35 +00:00
}
2021-03-09 21:35:13 +00:00
PANIC ( " Thread::state_string(): Invalid state: {} " , ( int ) state ( ) ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : finalize ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = g_finalizer ) ;
VERIFY ( Thread : : current ( ) ! = this ) ;
2019-08-01 18:01:23 +00:00
2021-01-23 22:29:11 +00:00
# if LOCK_DEBUG
2021-02-23 19:42:32 +00:00
VERIFY ( ! m_lock . own_lock ( ) ) ;
2020-12-01 02:04:36 +00:00
if ( lock_count ( ) > 0 ) {
2021-01-18 16:25:44 +00:00
dbgln ( " Thread {} leaking {} Locks! " , * this , lock_count ( ) ) ;
2020-12-01 02:04:36 +00:00
ScopedSpinLock list_lock ( m_holding_locks_lock ) ;
2021-04-24 22:17:02 +00:00
for ( auto & info : m_holding_locks_list ) {
const auto & location = info . source_location ;
2021-07-17 19:09:51 +00:00
dbgln ( " - Mutex: \" {} \" @ {} locked in function \" {} \" at \" {}:{} \" with a count of: {} " , info . lock - > name ( ) , info . lock , location . function_name ( ) , location . filename ( ) , location . line_number ( ) , info . count ) ;
2021-04-24 22:17:02 +00:00
}
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2020-12-01 02:04:36 +00:00
}
# endif
2020-10-26 02:22:59 +00:00
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Finalizing thread {} " , * this ) ;
2020-10-26 02:22:59 +00:00
set_state ( Thread : : State : : Dead ) ;
2020-11-29 23:05:27 +00:00
m_join_condition . thread_finalizing ( ) ;
2019-11-14 19:58:23 +00:00
}
2019-08-06 17:43:07 +00:00
if ( m_dump_backtrace_on_finalization )
2021-02-07 16:58:29 +00:00
dbgln ( " {} " , backtrace ( ) ) ;
2020-09-27 14:53:35 +00:00
2021-01-01 05:45:16 +00:00
drop_thread_count ( false ) ;
}
2020-09-27 14:53:35 +00:00
2021-01-01 05:45:16 +00:00
void Thread : : drop_thread_count ( bool initializing_first_thread )
{
2021-01-23 06:24:33 +00:00
bool is_last = process ( ) . remove_thread ( * this ) ;
2020-11-29 23:05:27 +00:00
2021-01-23 06:24:33 +00:00
if ( ! initializing_first_thread & & is_last )
2020-12-09 04:18:45 +00:00
process ( ) . finalize ( ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : finalize_dying_threads ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = g_finalizer ) ;
2019-04-20 12:02:19 +00:00
Vector < Thread * , 32 > dying_threads ;
2019-03-23 21:03:17 +00:00
{
2020-07-05 20:32:07 +00:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-06-07 09:43:58 +00:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2020-07-05 20:32:07 +00:00
if ( thread . is_finalizable ( ) )
dying_threads . append ( & thread ) ;
2019-03-23 21:03:17 +00:00
} ) ;
}
2019-12-22 10:35:02 +00:00
for ( auto * thread : dying_threads ) {
2021-07-01 16:18:38 +00:00
RefPtr < Process > process = thread - > process ( ) ;
dbgln_if ( PROCESS_DEBUG , " Before finalization, {} has {} refs and its process has {} " ,
* thread , thread - > ref_count ( ) , thread - > process ( ) . ref_count ( ) ) ;
2019-03-23 21:03:17 +00:00
thread - > finalize ( ) ;
2021-07-01 16:18:38 +00:00
dbgln_if ( PROCESS_DEBUG , " After finalization, {} has {} refs and its process has {} " ,
* thread , thread - > ref_count ( ) , thread - > process ( ) . ref_count ( ) ) ;
2020-09-27 14:53:35 +00:00
// This thread will never execute again, drop the running reference
// NOTE: This may not necessarily drop the last reference if anything
// else is still holding onto this thread!
thread - > unref ( ) ;
2019-12-22 10:35:02 +00:00
}
2019-03-23 21:03:17 +00:00
}
2021-01-25 23:37:36 +00:00
bool Thread : : tick ( )
2019-03-23 21:03:17 +00:00
{
2021-01-25 23:37:36 +00:00
if ( previous_mode ( ) = = PreviousMode : : KernelMode ) {
2020-08-02 02:04:56 +00:00
+ + m_process - > m_ticks_in_kernel ;
2020-12-04 05:12:50 +00:00
+ + m_ticks_in_kernel ;
} else {
+ + m_process - > m_ticks_in_user ;
+ + m_ticks_in_user ;
}
2019-03-23 21:03:17 +00:00
return - - m_ticks_left ;
}
2020-12-08 04:29:41 +00:00
void Thread : : check_dispatch_pending_signal ( )
{
auto result = DispatchSignalResult : : Continue ;
{
ScopedSpinLock scheduler_lock ( g_scheduler_lock ) ;
if ( pending_signals_for_state ( ) ) {
ScopedSpinLock lock ( m_lock ) ;
result = dispatch_one_pending_signal ( ) ;
}
}
switch ( result ) {
case DispatchSignalResult : : Yield :
2021-07-16 01:45:22 +00:00
yield_assuming_not_holding_big_lock ( ) ;
2020-12-08 04:29:41 +00:00
break ;
default :
break ;
}
}
2020-09-09 02:37:15 +00:00
u32 Thread : : pending_signals ( ) const
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
2020-11-29 23:05:27 +00:00
return pending_signals_for_state ( ) ;
}
u32 Thread : : pending_signals_for_state ( ) const
{
2021-02-23 19:42:32 +00:00
VERIFY ( g_scheduler_lock . own_lock ( ) ) ;
2020-11-29 23:05:27 +00:00
constexpr u32 stopped_signal_mask = ( 1 < < ( SIGCONT - 1 ) ) | ( 1 < < ( SIGKILL - 1 ) ) | ( 1 < < ( SIGTRAP - 1 ) ) ;
2021-01-20 23:06:19 +00:00
if ( is_handling_page_fault ( ) )
return 0 ;
2020-11-29 23:05:27 +00:00
return m_state ! = Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask ;
2020-09-09 02:37:15 +00:00
}
2020-02-01 09:27:25 +00:00
void Thread : : send_signal ( u8 signal , [[maybe_unused]] Process * sender )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal < 32 ) ;
2020-11-29 23:05:27 +00:00
ScopedSpinLock scheduler_lock ( g_scheduler_lock ) ;
2019-07-08 16:59:48 +00:00
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
2021-02-07 12:03:24 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal {} was ignored by {} " , signal , process ( ) ) ;
2019-07-08 16:59:48 +00:00
return ;
}
2019-03-23 21:03:17 +00:00
2021-01-23 22:59:27 +00:00
if constexpr ( SIGNAL_DEBUG ) {
2021-01-12 21:30:52 +00:00
if ( sender )
dbgln ( " Signal: {} sent {} to {} " , * sender , signal , process ( ) ) ;
else
dbgln ( " Signal: Kernel send {} to {} " , signal , process ( ) ) ;
}
2019-03-23 21:03:17 +00:00
2019-08-01 09:00:36 +00:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2020-11-29 23:05:27 +00:00
m_have_any_unmasked_pending_signals . store ( pending_signals_for_state ( ) & ~ m_signal_mask , AK : : memory_order_release ) ;
if ( m_state = = Stopped ) {
2020-12-08 04:29:41 +00:00
ScopedSpinLock lock ( m_lock ) ;
if ( pending_signals_for_state ( ) ) {
2021-02-07 12:03:24 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal: Resuming stopped {} to deliver signal {} " , * this , signal ) ;
2020-11-29 23:05:27 +00:00
resume_from_stopped ( ) ;
2020-12-08 04:29:41 +00:00
}
2020-11-29 23:05:27 +00:00
} else {
2020-12-08 04:29:41 +00:00
ScopedSpinLock block_lock ( m_block_lock ) ;
2021-02-07 12:03:24 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal: Unblocking {} to deliver signal {} " , * this , signal ) ;
2020-11-29 23:05:27 +00:00
unblock ( signal ) ;
}
2019-03-23 21:03:17 +00:00
}
2020-09-09 02:37:15 +00:00
u32 Thread : : update_signal_mask ( u32 signal_mask )
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
auto previous_signal_mask = m_signal_mask ;
m_signal_mask = signal_mask ;
2020-11-29 23:05:27 +00:00
m_have_any_unmasked_pending_signals . store ( pending_signals_for_state ( ) & ~ m_signal_mask , AK : : memory_order_release ) ;
2020-09-09 02:37:15 +00:00
return previous_signal_mask ;
}
u32 Thread : : signal_mask ( ) const
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
return m_signal_mask ;
}
u32 Thread : : signal_mask_block ( sigset_t signal_set , bool block )
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
auto previous_signal_mask = m_signal_mask ;
if ( block )
m_signal_mask & = ~ signal_set ;
else
m_signal_mask | = signal_set ;
2020-11-29 23:05:27 +00:00
m_have_any_unmasked_pending_signals . store ( pending_signals_for_state ( ) & ~ m_signal_mask , AK : : memory_order_release ) ;
2020-09-09 02:37:15 +00:00
return previous_signal_mask ;
}
void Thread : : clear_signals ( )
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
m_signal_mask = 0 ;
m_pending_signals = 0 ;
m_have_any_unmasked_pending_signals . store ( false , AK : : memory_order_release ) ;
2021-02-21 10:59:53 +00:00
m_signal_action_data . fill ( { } ) ;
2020-09-09 02:37:15 +00:00
}
2019-10-07 09:22:50 +00:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = this ) ;
2020-11-29 23:05:27 +00:00
DispatchSignalResult result ;
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
result = dispatch_signal ( signal ) ;
}
if ( result = = DispatchSignalResult : : Yield )
2021-07-16 01:45:22 +00:00
yield_and_release_relock_big_lock ( ) ;
2019-10-07 09:22:50 +00:00
}
2020-11-29 23:05:27 +00:00
DispatchSignalResult Thread : : dispatch_one_pending_signal ( )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( m_lock . own_lock ( ) ) ;
2020-11-29 23:05:27 +00:00
u32 signal_candidates = pending_signals_for_state ( ) & ~ m_signal_mask ;
2020-12-01 15:05:49 +00:00
if ( signal_candidates = = 0 )
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
2019-08-01 09:00:36 +00:00
u8 signal = 1 ;
2019-03-23 21:03:17 +00:00
for ( ; signal < 32 ; + + signal ) {
2019-08-01 09:00:36 +00:00
if ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) {
2019-03-23 21:03:17 +00:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2020-11-29 23:05:27 +00:00
DispatchSignalResult Thread : : try_dispatch_one_pending_signal ( u8 signal )
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal ! = 0 ) ;
2020-11-29 23:05:27 +00:00
ScopedSpinLock scheduler_lock ( g_scheduler_lock ) ;
ScopedSpinLock lock ( m_lock ) ;
u32 signal_candidates = pending_signals_for_state ( ) & ~ m_signal_mask ;
if ( ! ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) )
return DispatchSignalResult : : Continue ;
return dispatch_signal ( signal ) ;
}
2019-06-07 15:13:23 +00:00
enum class DefaultSignalAction {
2019-03-23 21:03:17 +00:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
Kernel: Mark compilation-unit-only functions as static
This enables a nice warning in case a function becomes dead code. Also, in case
of signal_trampoline_dummy, marking it external (non-static) prevents it from
being 'optimized away', which would lead to surprising and weird linker errors.
I found these places by using -Wmissing-declarations.
The Kernel still shows these issues, which I think are false-positives,
but don't want to touch:
- Kernel/Arch/i386/CPU.cpp:1081:17: void Kernel::enter_thread_context(Kernel::Thread*, Kernel::Thread*)
- Kernel/Arch/i386/CPU.cpp:1170:17: void Kernel::context_first_init(Kernel::Thread*, Kernel::Thread*, Kernel::TrapFrame*)
- Kernel/Arch/i386/CPU.cpp:1304:16: u32 Kernel::do_init_context(Kernel::Thread*, u32)
- Kernel/Arch/i386/CPU.cpp:1347:17: void Kernel::pre_init_finished()
- Kernel/Arch/i386/CPU.cpp:1360:17: void Kernel::post_init_finished()
No idea, not gonna touch it.
- Kernel/init.cpp:104:30: void Kernel::init()
- Kernel/init.cpp:167:30: void Kernel::init_ap(u32, Kernel::Processor*)
- Kernel/init.cpp:184:17: void Kernel::init_finished(u32)
Called by boot.S.
- Kernel/init.cpp:383:16: int Kernel::__cxa_atexit(void (*)(void*), void*, void*)
- Kernel/StdLib.cpp:285:19: void __cxa_pure_virtual()
- Kernel/StdLib.cpp:300:19: void __stack_chk_fail()
- Kernel/StdLib.cpp:305:19: void __stack_chk_fail_local()
Not sure how to tell the compiler that the compiler is already using them.
Also, maybe __cxa_atexit should go into StdLib.cpp?
- Kernel/Modules/TestModule.cpp:31:17: void module_init()
- Kernel/Modules/TestModule.cpp:40:17: void module_fini()
Could maybe go into a new header. This would also provide type-checking for new modules.
2020-08-10 19:12:13 +00:00
static DefaultSignalAction default_signal_action ( u8 signal )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal & & signal < NSIG ) ;
2019-03-23 21:03:17 +00:00
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
2020-09-08 16:07:25 +00:00
case SIGINFO :
2019-03-23 21:03:17 +00:00
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-08 16:59:48 +00:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal < 32 ) ;
2019-07-08 16:59:48 +00:00
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-10-07 09:22:50 +00:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal < 32 ) ;
2019-10-07 09:22:50 +00:00
auto & action = m_signal_action_data [ signal ] ;
return ! action . handler_or_sigaction . is_null ( ) ;
}
2021-02-25 15:18:36 +00:00
static bool push_value_on_user_stack ( FlatPtr * stack , FlatPtr data )
2019-11-04 08:29:47 +00:00
{
2021-02-25 15:18:36 +00:00
* stack - = sizeof ( FlatPtr ) ;
return copy_to_user ( ( FlatPtr * ) * stack , & data ) ;
2019-11-04 08:29:47 +00:00
}
2020-08-14 16:24:31 +00:00
void Thread : : resume_from_stopped ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( is_stopped ( ) ) ;
VERIFY ( m_stop_state ! = State : : Invalid ) ;
VERIFY ( g_scheduler_lock . own_lock ( ) ) ;
2020-12-09 04:18:45 +00:00
if ( m_stop_state = = Blocked ) {
ScopedSpinLock block_lock ( m_block_lock ) ;
2021-07-10 16:23:16 +00:00
if ( m_blocker | | m_blocking_lock ) {
2020-12-09 04:18:45 +00:00
// Hasn't been unblocked yet
set_state ( Blocked , 0 ) ;
} else {
// Was unblocked while stopped
set_state ( Runnable ) ;
}
} else {
set_state ( m_stop_state , 0 ) ;
}
2020-08-14 16:24:31 +00:00
}
2020-11-29 23:05:27 +00:00
DispatchSignalResult Thread : : dispatch_signal ( u8 signal )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY_INTERRUPTS_DISABLED ( ) ;
VERIFY ( g_scheduler_lock . own_lock ( ) ) ;
VERIFY ( signal > 0 & & signal < = 32 ) ;
VERIFY ( process ( ) . is_user_process ( ) ) ;
VERIFY ( this = = Thread : : current ( ) ) ;
2019-03-23 21:03:17 +00:00
2021-03-09 21:35:13 +00:00
dbgln_if ( SIGNAL_DEBUG , " Dispatch signal {} to {}, state: {} " , signal , * this , state_string ( ) ) ;
2019-03-23 21:03:17 +00:00
2020-09-07 14:31:00 +00:00
if ( m_state = = Invalid | | ! is_initialized ( ) ) {
// Thread has barely been created, we need to wait until it is
// at least in Runnable state and is_initialized() returns true,
// which indicates that it is fully set up an we actually have
// a register state on the stack that we can modify
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Deferred ;
}
2021-02-23 19:42:32 +00:00
VERIFY ( previous_mode ( ) = = PreviousMode : : UserMode ) ;
2021-01-25 20:19:34 +00:00
2019-03-23 21:03:17 +00:00
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
2021-02-23 19:42:32 +00:00
VERIFY ( ! ( action . flags & SA_SIGINFO ) ) ;
2019-03-23 21:03:17 +00:00
// Mark this signal as handled.
2019-08-01 09:00:36 +00:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2020-09-07 14:31:00 +00:00
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
2020-12-09 04:18:45 +00:00
auto & process = this - > process ( ) ;
auto tracer = process . tracer ( ) ;
if ( signal = = SIGSTOP | | ( tracer & & default_signal_action ( signal ) = = DefaultSignalAction : : DumpCore ) ) {
2021-03-09 21:35:13 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal {} stopping this thread " , signal ) ;
2020-12-09 04:18:45 +00:00
set_state ( State : : Stopped , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Yield ;
2019-03-23 21:03:17 +00:00
}
2020-12-08 04:29:41 +00:00
if ( signal = = SIGCONT ) {
2021-01-12 21:30:52 +00:00
dbgln ( " signal: SIGCONT resuming {} " , * this ) ;
2020-08-14 16:24:31 +00:00
} else {
2020-12-09 04:18:45 +00:00
if ( tracer ) {
2020-03-28 08:47:16 +00:00
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
2020-12-09 04:18:45 +00:00
if ( ! tracer - > has_pending_signal ( signal ) ) {
2021-01-12 21:30:52 +00:00
dbgln ( " signal: {} stopping {} for tracer " , signal , * this ) ;
2020-12-09 04:18:45 +00:00
set_state ( Stopped , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Yield ;
2020-03-28 08:47:16 +00:00
}
2020-12-09 04:18:45 +00:00
tracer - > unset_signal ( signal ) ;
2020-03-28 08:47:16 +00:00
}
2020-03-01 14:14:17 +00:00
}
2019-03-23 21:03:17 +00:00
2019-06-07 10:56:50 +00:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-23 21:03:17 +00:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
2020-12-09 04:18:45 +00:00
set_state ( Stopped , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Yield ;
2019-08-06 17:43:07 +00:00
case DefaultSignalAction : : DumpCore :
2020-11-06 08:09:51 +00:00
process . set_dump_core ( true ) ;
2020-12-09 04:18:45 +00:00
process . for_each_thread ( [ ] ( auto & thread ) {
2019-08-06 17:43:07 +00:00
thread . set_dump_backtrace_on_finalization ( ) ;
} ) ;
2019-07-25 19:02:19 +00:00
[[fallthrough]] ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Terminate :
2020-08-02 02:04:56 +00:00
m_process - > terminate_due_to_signal ( signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Terminate ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Ignore :
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Continue :
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
}
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
}
2019-06-07 10:56:50 +00:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2021-03-09 21:35:13 +00:00
dbgln_if ( SIGNAL_DEBUG , " Ignored signal {} " , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
}
2021-02-23 19:42:32 +00:00
VERIFY ( previous_mode ( ) = = PreviousMode : : UserMode ) ;
VERIFY ( current_trap ( ) ) ;
2021-01-25 20:19:34 +00:00
2019-09-04 13:14:54 +00:00
ProcessPagingScope paging_scope ( m_process ) ;
2019-07-03 19:17:35 +00:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-23 21:03:17 +00:00
if ( action . flags & SA_NODEFER )
2019-08-01 09:00:36 +00:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-23 21:03:17 +00:00
else
2019-08-01 09:00:36 +00:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-23 21:03:17 +00:00
m_signal_mask | = new_signal_mask ;
2020-09-07 14:31:00 +00:00
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
2020-09-07 14:31:00 +00:00
auto setup_stack = [ & ] ( RegisterState & state ) {
2021-02-25 15:18:36 +00:00
# if ARCH(I386)
FlatPtr * stack = & state . userspace_esp ;
FlatPtr old_esp = * stack ;
FlatPtr ret_eip = state . eip ;
FlatPtr ret_eflags = state . eflags ;
2021-06-23 19:54:41 +00:00
dbgln_if ( SIGNAL_DEBUG , " Setting up user stack to return to EIP {:p}, ESP {:p} " , ret_eip , old_esp ) ;
2021-03-04 16:50:05 +00:00
# elif ARCH(X86_64)
2021-06-26 12:56:28 +00:00
FlatPtr * stack = & state . userspace_rsp ;
2021-06-29 08:31:25 +00:00
FlatPtr old_rsp = * stack ;
FlatPtr ret_rip = state . rip ;
FlatPtr ret_rflags = state . rflags ;
dbgln_if ( SIGNAL_DEBUG , " Setting up user stack to return to RIP {:p}, RSP {:p} " , ret_rip , old_rsp ) ;
2021-03-04 16:50:05 +00:00
# endif
2019-11-04 08:29:47 +00:00
2021-02-25 15:18:36 +00:00
# if ARCH(I386)
2019-11-04 08:29:47 +00:00
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here.
2021-07-01 22:57:48 +00:00
// 56 % 16 = 8, so we only need to take 8 bytes into consideration for
// the stack alignment.
FlatPtr stack_alignment = ( * stack - 8 ) % 16 ;
2019-11-04 08:29:47 +00:00
* stack - = stack_alignment ;
push_value_on_user_stack ( stack , ret_eflags ) ;
push_value_on_user_stack ( stack , ret_eip ) ;
push_value_on_user_stack ( stack , state . eax ) ;
push_value_on_user_stack ( stack , state . ecx ) ;
push_value_on_user_stack ( stack , state . edx ) ;
push_value_on_user_stack ( stack , state . ebx ) ;
push_value_on_user_stack ( stack , old_esp ) ;
push_value_on_user_stack ( stack , state . ebp ) ;
push_value_on_user_stack ( stack , state . esi ) ;
push_value_on_user_stack ( stack , state . edi ) ;
2021-06-23 19:54:41 +00:00
# else
2021-06-29 08:31:25 +00:00
// Align the stack to 16 bytes.
// Note that we push 176 bytes (8 * 22) on to the stack,
// so we need to account for this here.
2021-07-01 22:57:48 +00:00
// 22 % 2 = 0, so we dont need to take anything into consideration
// for the alignment.
// We also are not allowed to touch the thread's red-zone of 128 bytes
FlatPtr stack_alignment = * stack % 16 ;
* stack - = 128 + stack_alignment ;
2021-06-29 08:31:25 +00:00
push_value_on_user_stack ( stack , ret_rflags ) ;
push_value_on_user_stack ( stack , ret_rip ) ;
push_value_on_user_stack ( stack , state . r15 ) ;
push_value_on_user_stack ( stack , state . r14 ) ;
push_value_on_user_stack ( stack , state . r13 ) ;
push_value_on_user_stack ( stack , state . r12 ) ;
push_value_on_user_stack ( stack , state . r11 ) ;
push_value_on_user_stack ( stack , state . r10 ) ;
push_value_on_user_stack ( stack , state . r9 ) ;
push_value_on_user_stack ( stack , state . r8 ) ;
push_value_on_user_stack ( stack , state . rax ) ;
push_value_on_user_stack ( stack , state . rcx ) ;
push_value_on_user_stack ( stack , state . rdx ) ;
push_value_on_user_stack ( stack , state . rbx ) ;
push_value_on_user_stack ( stack , old_rsp ) ;
push_value_on_user_stack ( stack , state . rbp ) ;
push_value_on_user_stack ( stack , state . rsi ) ;
push_value_on_user_stack ( stack , state . rdi ) ;
2021-02-25 15:18:36 +00:00
# endif
2019-11-04 08:29:47 +00:00
// PUSH old_signal_mask
push_value_on_user_stack ( stack , old_signal_mask ) ;
push_value_on_user_stack ( stack , signal ) ;
push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ;
push_value_on_user_stack ( stack , 0 ) ; //push fake return address
2021-02-23 19:42:32 +00:00
VERIFY ( ( * stack % 16 ) = = 0 ) ;
2019-11-04 08:29:47 +00:00
} ;
// We now place the thread state on the userspace stack.
2020-08-02 18:08:22 +00:00
// Note that we use a RegisterState.
2020-02-15 23:15:37 +00:00
// Conversely, when the thread isn't blocking the RegisterState may not be
2019-11-04 08:29:47 +00:00
// valid (fork, exec etc) but the tss will, so we use that instead.
2020-08-02 18:08:22 +00:00
auto & regs = get_register_dump_from_stack ( ) ;
2020-09-07 14:31:00 +00:00
setup_stack ( regs ) ;
2021-06-26 12:56:28 +00:00
auto signal_trampoline_addr = process . signal_trampoline ( ) . get ( ) ;
# if ARCH(I386)
regs . eip = signal_trampoline_addr ;
# else
regs . rip = signal_trampoline_addr ;
# endif
2019-03-23 21:03:17 +00:00
2021-06-23 19:54:41 +00:00
# if ARCH(I386)
2021-06-26 17:57:16 +00:00
dbgln_if ( SIGNAL_DEBUG , " Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {} " , state_string ( ) , m_regs . cs , m_regs . eip , signal ) ;
2021-06-23 19:54:41 +00:00
# else
2021-06-26 17:57:16 +00:00
dbgln_if ( SIGNAL_DEBUG , " Thread in state '{}' has been primed with signal handler {:04x}:{:16x} to deliver {} " , state_string ( ) , m_regs . cs , m_regs . rip , signal ) ;
2021-06-23 19:54:41 +00:00
# endif
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
}
2020-02-15 23:15:37 +00:00
RegisterState & Thread : : get_register_dump_from_stack ( )
2019-11-02 09:11:41 +00:00
{
2021-01-25 20:19:34 +00:00
auto * trap = current_trap ( ) ;
// We should *always* have a trap. If we don't we're probably a kernel
// thread that hasn't been pre-empted. If we want to support this, we
2021-06-29 08:31:25 +00:00
// need to capture the registers probably into m_regs and return it
2021-02-23 19:42:32 +00:00
VERIFY ( trap ) ;
2021-01-25 20:19:34 +00:00
while ( trap ) {
if ( ! trap - > next_trap )
break ;
trap = trap - > next_trap ;
}
return * trap - > regs ;
2019-11-02 09:11:41 +00:00
}
2020-09-27 14:53:35 +00:00
RefPtr < Thread > Thread : : clone ( Process & process )
2019-03-23 21:03:17 +00:00
{
2021-02-07 17:13:51 +00:00
auto thread_or_error = Thread : : try_create ( process ) ;
if ( thread_or_error . is_error ( ) )
2020-09-05 21:52:14 +00:00
return { } ;
2021-02-07 17:13:51 +00:00
auto & clone = thread_or_error . value ( ) ;
2021-02-21 10:59:53 +00:00
auto signal_action_data_span = m_signal_action_data . span ( ) ;
signal_action_data_span . copy_to ( clone - > m_signal_action_data . span ( ) ) ;
2019-03-23 21:03:17 +00:00
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 14:27:45 +00:00
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-09-07 13:50:44 +00:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2019-03-23 21:03:17 +00:00
return clone ;
}
2020-12-09 04:18:45 +00:00
void Thread : : set_state ( State new_state , u8 stop_signal )
2019-05-18 18:07:00 +00:00
{
2020-12-09 04:18:45 +00:00
State previous_state ;
2021-02-23 19:42:32 +00:00
VERIFY ( g_scheduler_lock . own_lock ( ) ) ;
2019-12-01 14:54:47 +00:00
if ( new_state = = m_state )
return ;
2020-12-09 04:18:45 +00:00
{
ScopedSpinLock thread_lock ( m_lock ) ;
previous_state = m_state ;
if ( previous_state = = Invalid ) {
// If we were *just* created, we may have already pending signals
if ( has_unmasked_pending_signals ( ) ) {
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Dispatch pending signals to new thread {} " , * this ) ;
2020-12-09 04:18:45 +00:00
dispatch_one_pending_signal ( ) ;
}
2020-09-07 14:31:00 +00:00
}
2020-12-09 04:18:45 +00:00
m_state = new_state ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Set thread {} state to {} " , * this , state_string ( ) ) ;
2020-12-09 04:18:45 +00:00
}
2020-07-05 20:32:07 +00:00
2021-01-22 23:56:08 +00:00
if ( previous_state = = Runnable ) {
Scheduler : : dequeue_runnable_thread ( * this ) ;
} else if ( previous_state = = Stopped ) {
2020-11-29 23:05:27 +00:00
m_stop_state = State : : Invalid ;
2020-12-09 04:18:45 +00:00
auto & process = this - > process ( ) ;
if ( process . set_stopped ( false ) = = true ) {
process . for_each_thread ( [ & ] ( auto & thread ) {
2021-05-16 09:36:52 +00:00
if ( & thread = = this )
return ;
if ( ! thread . is_stopped ( ) )
return ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Resuming peer thread {} " , thread ) ;
2020-12-09 04:18:45 +00:00
thread . resume_from_stopped ( ) ;
} ) ;
process . unblock_waiters ( Thread : : WaitBlocker : : UnblockFlags : : Continued ) ;
2021-03-29 22:12:51 +00:00
// Tell the parent process (if any) about this change.
if ( auto parent = Process : : from_pid ( process . ppid ( ) ) ) {
[[maybe_unused]] auto result = parent - > send_signal ( SIGCHLD , & process ) ;
}
2020-12-09 04:18:45 +00:00
}
2020-11-29 23:05:27 +00:00
}
2020-10-28 22:06:16 +00:00
if ( m_state = = Runnable ) {
2021-01-22 23:56:08 +00:00
Scheduler : : queue_runnable_thread ( * this ) ;
2020-10-28 22:06:16 +00:00
Processor : : smp_wake_n_idle_processors ( 1 ) ;
} else if ( m_state = = Stopped ) {
2020-11-29 23:05:27 +00:00
// We don't want to restore to Running state, only Runnable!
2020-12-09 04:18:45 +00:00
m_stop_state = previous_state ! = Running ? previous_state : Runnable ;
auto & process = this - > process ( ) ;
if ( process . set_stopped ( true ) = = false ) {
process . for_each_thread ( [ & ] ( auto & thread ) {
2021-05-16 09:36:52 +00:00
if ( & thread = = this )
return ;
if ( thread . is_stopped ( ) )
return ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Stopping peer thread {} " , thread ) ;
2020-12-09 04:18:45 +00:00
thread . set_state ( Stopped , stop_signal ) ;
} ) ;
process . unblock_waiters ( Thread : : WaitBlocker : : UnblockFlags : : Stopped , stop_signal ) ;
2021-03-29 22:12:51 +00:00
// Tell the parent process (if any) about this change.
if ( auto parent = Process : : from_pid ( process . ppid ( ) ) ) {
[[maybe_unused]] auto result = parent - > send_signal ( SIGCHLD , & process ) ;
}
2020-12-09 04:18:45 +00:00
}
2020-11-29 23:05:27 +00:00
} else if ( m_state = = Dying ) {
2021-02-23 19:42:32 +00:00
VERIFY ( previous_state ! = Blocked ) ;
2020-08-06 01:13:28 +00:00
if ( this ! = Thread : : current ( ) & & is_finalizable ( ) ) {
// Some other thread set this thread to Dying, notify the
// finalizer right away as it can be cleaned up now
Scheduler : : notify_finalizer ( ) ;
}
2020-07-05 20:32:07 +00:00
}
2019-04-17 10:41:51 +00:00
}
2019-07-25 19:02:19 +00:00
2020-01-19 09:10:46 +00:00
struct RecognizedSymbol {
2021-02-25 15:18:36 +00:00
FlatPtr address ;
2020-04-08 11:30:50 +00:00
const KernelSymbol * symbol { nullptr } ;
2020-01-19 09:10:46 +00:00
} ;
2021-07-05 19:30:51 +00:00
static bool symbolicate ( RecognizedSymbol const & symbol , Process & process , StringBuilder & builder )
2020-01-19 09:10:46 +00:00
{
if ( ! symbol . address )
return false ;
bool mask_kernel_addresses = ! process . is_superuser ( ) ;
2020-04-08 11:30:50 +00:00
if ( ! symbol . symbol ) {
2020-01-19 09:10:46 +00:00
if ( ! is_user_address ( VirtualAddress ( symbol . address ) ) ) {
builder . append ( " 0xdeadc0de \n " ) ;
} else {
2021-07-05 19:30:51 +00:00
if ( auto * region = process . space ( ) . find_region_containing ( { VirtualAddress ( symbol . address ) , sizeof ( FlatPtr ) } ) ) {
size_t offset = symbol . address - region - > vaddr ( ) . get ( ) ;
if ( auto region_name = region - > name ( ) ; ! region_name . is_null ( ) & & ! region_name . is_empty ( ) )
builder . appendff ( " {:p} {} + 0x{:x} \n " , ( void * ) symbol . address , region_name , offset ) ;
else
builder . appendff ( " {:p} {:p} + 0x{:x} \n " , ( void * ) symbol . address , region - > vaddr ( ) . as_ptr ( ) , offset ) ;
} else {
builder . appendff ( " {:p} \n " , symbol . address ) ;
}
2020-01-19 09:10:46 +00:00
}
return true ;
}
2020-04-08 11:30:50 +00:00
unsigned offset = symbol . address - symbol . symbol - > address ;
if ( symbol . symbol - > address = = g_highest_kernel_symbol_address & & offset > 4096 ) {
2021-02-09 15:08:43 +00:00
builder . appendff ( " {:p} \n " , ( void * ) ( mask_kernel_addresses ? 0xdeadc0de : symbol . address ) ) ;
2020-01-19 09:10:46 +00:00
} else {
2021-07-06 10:35:26 +00:00
builder . appendff ( " {:p} {} + 0x{:x} \n " , ( void * ) ( mask_kernel_addresses ? 0xdeadc0de : symbol . address ) , symbol . symbol - > name , offset ) ;
2020-01-19 09:10:46 +00:00
}
return true ;
}
2021-02-07 16:58:29 +00:00
String Thread : : backtrace ( )
2019-07-25 19:02:19 +00:00
{
2020-01-19 09:10:46 +00:00
Vector < RecognizedSymbol , 128 > recognized_symbols ;
2019-07-25 19:02:19 +00:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
2020-12-08 04:29:41 +00:00
auto stack_trace = Processor : : capture_stack_trace ( * this ) ;
2021-02-23 19:42:32 +00:00
VERIFY ( ! g_scheduler_lock . own_lock ( ) ) ;
2019-07-25 19:02:19 +00:00
ProcessPagingScope paging_scope ( process ) ;
2020-12-08 04:29:41 +00:00
for ( auto & frame : stack_trace ) {
if ( is_user_range ( VirtualAddress ( frame ) , sizeof ( FlatPtr ) * 2 ) ) {
2021-02-03 10:08:23 +00:00
recognized_symbols . append ( { frame } ) ;
2020-12-08 04:29:41 +00:00
} else {
recognized_symbols . append ( { frame , symbolicate_kernel_address ( frame ) } ) ;
2020-01-19 09:10:46 +00:00
}
2019-07-25 19:02:19 +00:00
}
2020-01-19 09:10:46 +00:00
StringBuilder builder ;
2019-07-25 19:02:19 +00:00
for ( auto & symbol : recognized_symbols ) {
2020-12-24 23:59:15 +00:00
if ( ! symbolicate ( symbol , process , builder ) )
2019-07-25 19:02:19 +00:00
break ;
}
return builder . to_string ( ) ;
}
2019-09-07 13:50:44 +00:00
2020-12-25 15:45:35 +00:00
size_t Thread : : thread_specific_region_alignment ( ) const
{
return max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
}
size_t Thread : : thread_specific_region_size ( ) const
{
return align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ( ) ) + sizeof ( ThreadSpecificData ) ;
}
2020-09-16 17:47:47 +00:00
KResult Thread : : make_thread_specific_region ( Badge < Process > )
2019-09-07 13:50:44 +00:00
{
2021-03-15 19:56:13 +00:00
// The process may not require a TLS region, or allocate TLS later with sys$allocate_tls (which is what dynamically loaded programs do)
2020-10-10 09:13:21 +00:00
if ( ! process ( ) . m_master_tls_region )
return KSuccess ;
2021-02-08 14:45:40 +00:00
auto range = process ( ) . space ( ) . allocate_range ( { } , thread_specific_region_size ( ) ) ;
2021-01-27 20:01:45 +00:00
if ( ! range . has_value ( ) )
2021-01-26 13:13:57 +00:00
return ENOMEM ;
2021-02-08 14:45:40 +00:00
auto region_or_error = process ( ) . space ( ) . allocate_region ( range . value ( ) , " Thread-specific " , PROT_READ | PROT_WRITE ) ;
2021-01-15 16:27:52 +00:00
if ( region_or_error . is_error ( ) )
return region_or_error . error ( ) ;
2020-12-25 15:45:35 +00:00
2021-05-28 09:18:58 +00:00
m_thread_specific_range = range . value ( ) ;
2020-01-05 17:00:15 +00:00
SmapDisabler disabler ;
2021-01-15 16:27:52 +00:00
auto * thread_specific_data = ( ThreadSpecificData * ) region_or_error . value ( ) - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ( ) ) ) . as_ptr ( ) ;
2019-09-07 13:50:44 +00:00
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
2020-01-20 12:06:41 +00:00
m_thread_specific_data = VirtualAddress ( thread_specific_data ) ;
2019-09-07 13:50:44 +00:00
thread_specific_data - > self = thread_specific_data ;
2021-03-15 19:56:13 +00:00
2019-09-07 15:06:25 +00:00
if ( process ( ) . m_master_tls_size )
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 22:26:13 +00:00
memcpy ( thread_local_storage , process ( ) . m_master_tls_region . unsafe_ptr ( ) - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2021-03-15 19:56:13 +00:00
2020-09-16 17:47:47 +00:00
return KSuccess ;
2019-09-07 13:50:44 +00:00
}
2019-10-13 12:36:55 +00:00
2020-09-27 14:53:35 +00:00
RefPtr < Thread > Thread : : from_tid ( ThreadID tid )
2019-12-30 18:23:13 +00:00
{
2020-09-27 14:53:35 +00:00
RefPtr < Thread > found_thread ;
2021-01-28 05:58:24 +00:00
{
ScopedSpinLock lock ( g_tid_map_lock ) ;
2021-05-20 19:58:36 +00:00
if ( auto it = g_tid_map - > find ( tid ) ; it ! = g_tid_map - > end ( ) ) {
// We need to call try_ref() here as there is a window between
// dropping the last reference and calling the Thread's destructor!
// We shouldn't remove the threads from that list until it is truly
// destructed as it may stick around past finalization in order to
// be able to wait() on it!
if ( it - > value - > try_ref ( ) ) {
found_thread = adopt_ref ( * it - > value ) ;
}
}
2021-01-28 05:58:24 +00:00
}
2019-12-30 18:23:13 +00:00
return found_thread ;
}
2020-02-16 00:27:42 +00:00
2020-02-18 12:44:27 +00:00
void Thread : : reset_fpu_state ( )
{
2020-06-27 19:42:28 +00:00
memcpy ( m_fpu_state , & Processor : : current ( ) . clean_fpu_state ( ) , sizeof ( FPUState ) ) ;
2020-02-18 12:44:27 +00:00
}
2020-12-09 04:18:45 +00:00
bool Thread : : should_be_stopped ( ) const
2020-04-07 15:23:37 +00:00
{
2020-12-09 04:18:45 +00:00
return process ( ) . is_stopped ( ) ;
2020-04-07 15:23:37 +00:00
}
2020-02-16 00:27:42 +00:00
}
2021-01-08 23:42:44 +00:00
void AK : : Formatter < Kernel : : Thread > : : format ( FormatBuilder & builder , const Kernel : : Thread & value )
{
return AK : : Formatter < FormatString > : : format (
builder ,
" {}({}:{}) " , value . process ( ) . name ( ) , value . pid ( ) . value ( ) , value . tid ( ) . value ( ) ) ;
}