2020-01-18 08:38:21 +00:00
/*
2021-03-09 21:35:13 +00:00
* Copyright ( c ) 2018 - 2021 , Andreas Kling < kling @ serenityos . org >
2020-01-18 08:38:21 +00:00
*
2021-04-22 08:24:48 +00:00
* SPDX - License - Identifier : BSD - 2 - Clause
2020-01-18 08:38:21 +00:00
*/
2021-01-01 05:45:16 +00:00
# include <AK/ScopeGuard.h>
2021-08-15 10:38:02 +00:00
# include <AK/Singleton.h>
2019-07-25 19:02:19 +00:00
# include <AK/StringBuilder.h>
2022-01-29 12:08:37 +00:00
# include <AK/TemporaryChange.h>
2020-11-15 18:58:19 +00:00
# include <AK/Time.h>
2021-10-14 21:53:48 +00:00
# include <Kernel/Arch/SmapDisabler.h>
2022-10-16 14:57:21 +00:00
# include <Kernel/Arch/TrapFrame.h>
2021-01-25 15:07:10 +00:00
# include <Kernel/Debug.h>
2021-06-06 23:15:07 +00:00
# include <Kernel/Devices/KCOVDevice.h>
2021-09-07 11:39:11 +00:00
# include <Kernel/FileSystem/OpenFileDescription.h>
2022-10-05 17:27:36 +00:00
# include <Kernel/InterruptDisabler.h>
2020-02-16 00:27:42 +00:00
# include <Kernel/KSyms.h>
2021-08-06 08:45:34 +00:00
# include <Kernel/Memory/MemoryManager.h>
# include <Kernel/Memory/PageDirectory.h>
2021-09-06 15:22:36 +00:00
# include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
2021-03-09 21:35:13 +00:00
# include <Kernel/Panic.h>
2021-01-11 08:52:18 +00:00
# include <Kernel/PerformanceEventBuffer.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Process.h>
Kernel: Introduce the new ProcFS design
The new ProcFS design consists of two main parts:
1. The representative ProcFS class, which is derived from the FS class.
The ProcFS and its inodes are much more lean - merely 3 classes to
represent the common type of inodes - regular files, symbolic links and
directories. They're backed by a ProcFSExposedComponent object, which
is responsible for the functional operation behind the scenes.
2. The backend of the ProcFS - the ProcFSComponentsRegistrar class
and all derived classes from the ProcFSExposedComponent class. These
together form the entire backend and handle all the functions you can
expect from the ProcFS.
The ProcFSExposedComponent derived classes split to 3 types in the
manner of lifetime in the kernel:
1. Persistent objects - this category includes all basic objects, like
the root folder, /proc/bus folder, main blob files in the root folders,
etc. These objects are persistent and cannot die ever.
2. Semi-persistent objects - this category includes all PID folders,
and subdirectories to the PID folders. It also includes exposed objects
like the unveil JSON'ed blob. These object are persistent as long as the
the responsible process they represent is still alive.
3. Dynamic objects - this category includes files in the subdirectories
of a PID folder, like /proc/PID/fd/* or /proc/PID/stacks/*. Essentially,
these objects are always created dynamically and when no longer in need
after being used, they're deallocated.
Nevertheless, the new allocated backend objects and inodes try to use
the same InodeIndex if possible - this might change only when a thread
dies and a new thread is born with a new thread stack, or when a file
descriptor is closed and a new one within the same file descriptor
number is opened. This is needed to actually be able to do something
useful with these objects.
The new design assures that many ProcFS instances can be used at once,
with one backend for usage for all instances.
2021-06-12 01:23:58 +00:00
# include <Kernel/ProcessExposed.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Scheduler.h>
2021-06-22 15:40:16 +00:00
# include <Kernel/Sections.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Thread.h>
2020-03-28 08:47:16 +00:00
# include <Kernel/ThreadTracer.h>
2020-04-26 09:32:37 +00:00
# include <Kernel/TimerQueue.h>
2022-01-15 19:19:41 +00:00
# include <Kernel/kstdio.h>
2019-03-23 21:03:17 +00:00
# include <LibC/signal_numbers.h>
2020-02-16 00:27:42 +00:00
namespace Kernel {
2021-08-21 23:37:17 +00:00
static Singleton < SpinlockProtected < Thread : : GlobalList > > s_list ;
2021-01-28 05:58:24 +00:00
2021-08-21 23:37:17 +00:00
SpinlockProtected < Thread : : GlobalList > & Thread : : all_instances ( )
2021-01-28 05:58:24 +00:00
{
2021-08-15 10:38:02 +00:00
return * s_list ;
}
2022-08-19 18:53:40 +00:00
ErrorOr < NonnullLockRefPtr < Thread > > Thread : : try_create ( NonnullLockRefPtr < Process > process )
2021-02-07 17:13:51 +00:00
{
2021-09-05 23:36:14 +00:00
auto kernel_stack_region = TRY ( MM . allocate_kernel_region ( default_kernel_stack_size , { } , Memory : : Region : : Access : : ReadWrite , AllocationStrategy : : AllocateNow ) ) ;
2021-02-07 19:13:51 +00:00
kernel_stack_region - > set_stack ( true ) ;
2021-05-11 10:53:38 +00:00
2022-08-19 18:53:40 +00:00
auto block_timer = TRY ( try_make_lock_ref_counted < Timer > ( ) ) ;
2021-05-19 22:41:51 +00:00
2021-09-06 17:24:54 +00:00
auto name = TRY ( KString : : try_create ( process - > name ( ) ) ) ;
2022-08-19 18:53:40 +00:00
return adopt_nonnull_lock_ref_or_enomem ( new ( nothrow ) Thread ( move ( process ) , move ( kernel_stack_region ) , move ( block_timer ) , move ( name ) ) ) ;
2021-02-07 17:13:51 +00:00
}
2022-08-19 18:53:40 +00:00
Thread : : Thread ( NonnullLockRefPtr < Process > process , NonnullOwnPtr < Memory : : Region > kernel_stack_region , NonnullLockRefPtr < Timer > block_timer , NonnullOwnPtr < KString > name )
2020-08-02 02:04:56 +00:00
: m_process ( move ( process ) )
2021-02-07 17:13:51 +00:00
, m_kernel_stack_region ( move ( kernel_stack_region ) )
2021-08-05 20:22:26 +00:00
, m_name ( move ( name ) )
2021-08-22 08:44:43 +00:00
, m_block_timer ( move ( block_timer ) )
2019-05-18 16:31:36 +00:00
{
2021-01-23 06:24:33 +00:00
bool is_first_thread = m_process - > add_thread ( * this ) ;
2021-01-01 05:45:16 +00:00
if ( is_first_thread ) {
2019-12-22 10:51:24 +00:00
// First thread gets TID == PID
2020-08-08 15:32:34 +00:00
m_tid = m_process - > pid ( ) . value ( ) ;
2019-12-22 10:51:24 +00:00
} else {
2020-08-08 15:32:34 +00:00
m_tid = Process : : allocate_pid ( ) . value ( ) ;
2019-12-22 10:51:24 +00:00
}
2021-02-07 17:13:51 +00:00
2021-12-28 08:38:41 +00:00
// FIXME: Handle KString allocation failure.
m_kernel_stack_region - > set_name ( MUST ( KString : : formatted ( " Kernel stack (thread {}) " , m_tid . value ( ) ) ) ) ;
2021-02-07 17:13:51 +00:00
2021-08-16 19:52:42 +00:00
Thread : : all_instances ( ) . with ( [ & ] ( auto & list ) {
2021-08-15 10:38:02 +00:00
list . append ( * this ) ;
} ) ;
2021-01-23 22:59:27 +00:00
if constexpr ( THREAD_DEBUG )
2021-01-12 21:30:52 +00:00
dbgln ( " Created new thread {}({}:{}) " , m_process - > name ( ) , m_process - > pid ( ) . value ( ) , m_tid . value ( ) ) ;
2021-02-21 10:03:49 +00:00
2020-02-18 12:44:27 +00:00
reset_fpu_state ( ) ;
2019-03-23 21:03:17 +00:00
// Only IF is set when a process boots.
2021-08-19 19:53:53 +00:00
m_regs . set_flags ( 0x0202 ) ;
2019-03-23 21:03:17 +00:00
2021-08-19 19:53:53 +00:00
# if ARCH(I386)
2020-09-10 15:46:24 +00:00
if ( m_process - > is_kernel_process ( ) ) {
2021-06-26 17:57:16 +00:00
m_regs . cs = GDT_SELECTOR_CODE0 ;
m_regs . ds = GDT_SELECTOR_DATA0 ;
m_regs . es = GDT_SELECTOR_DATA0 ;
2021-07-02 12:02:36 +00:00
m_regs . fs = 0 ;
2021-06-26 17:57:16 +00:00
m_regs . ss = GDT_SELECTOR_DATA0 ;
2021-07-02 12:02:36 +00:00
m_regs . gs = GDT_SELECTOR_PROC ;
2019-03-23 21:03:17 +00:00
} else {
2021-06-26 17:57:16 +00:00
m_regs . cs = GDT_SELECTOR_CODE3 | 3 ;
m_regs . ds = GDT_SELECTOR_DATA3 | 3 ;
m_regs . es = GDT_SELECTOR_DATA3 | 3 ;
m_regs . fs = GDT_SELECTOR_DATA3 | 3 ;
m_regs . ss = GDT_SELECTOR_DATA3 | 3 ;
m_regs . gs = GDT_SELECTOR_TLS | 3 ;
2019-03-23 21:03:17 +00:00
}
2022-07-22 18:48:24 +00:00
# elif ARCH(X86_64)
2021-06-27 11:59:41 +00:00
if ( m_process - > is_kernel_process ( ) )
m_regs . cs = GDT_SELECTOR_CODE0 ;
else
m_regs . cs = GDT_SELECTOR_CODE3 | 3 ;
2022-10-16 14:58:44 +00:00
# elif ARCH(AARCH64)
TODO_AARCH64 ( ) ;
2022-07-22 18:48:24 +00:00
# else
# error Unknown architecture
2021-06-23 19:54:41 +00:00
# endif
2019-03-23 21:03:17 +00:00
2022-08-23 15:58:05 +00:00
m_regs . cr3 = m_process - > address_space ( ) . with ( [ ] ( auto & space ) { return space - > page_directory ( ) . cr3 ( ) ; } ) ;
2021-06-26 17:57:16 +00:00
2020-01-27 11:52:10 +00:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
2021-07-17 00:09:45 +00:00
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & ~ ( FlatPtr ) 0x7u ;
2020-01-27 11:52:10 +00:00
2020-09-10 15:46:24 +00:00
if ( m_process - > is_kernel_process ( ) ) {
2021-08-19 19:53:53 +00:00
m_regs . set_sp ( m_kernel_stack_top ) ;
m_regs . set_sp0 ( m_kernel_stack_top ) ;
2019-03-23 21:03:17 +00:00
} else {
2020-01-27 11:52:10 +00:00
// Ring 3 processes get a separate stack for ring 0.
// The ring 3 stack will be assigned by exec().
2021-06-26 17:57:16 +00:00
# if ARCH(I386)
m_regs . ss0 = GDT_SELECTOR_DATA0 ;
2021-06-23 19:54:41 +00:00
# endif
2021-08-19 19:53:53 +00:00
m_regs . set_sp0 ( m_kernel_stack_top ) ;
2021-06-26 17:57:16 +00:00
}
2019-03-23 21:03:17 +00:00
2020-09-27 14:53:35 +00:00
// We need to add another reference if we could successfully create
// all the resources needed for this thread. The reason for this is that
// we don't want to delete this thread after dropping the reference,
// it may still be running or scheduled to be run.
// The finalizer is responsible for dropping this reference once this
// thread is ready to be cleaned up.
ref ( ) ;
2019-03-23 21:03:17 +00:00
}
Thread : : ~ Thread ( )
{
2022-08-19 11:59:15 +00:00
VERIFY ( ! m_process_thread_list_node . is_in_list ( ) ) ;
// We shouldn't be queued
VERIFY ( m_runnable_priority < 0 ) ;
2020-11-29 23:05:27 +00:00
}
2020-11-11 23:05:00 +00:00
2022-01-29 11:46:04 +00:00
Thread : : BlockResult Thread : : block_impl ( BlockTimeout const & timeout , Blocker & blocker )
{
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
VERIFY ( this = = Thread : : current ( ) ) ;
ScopedCritical critical ;
2022-08-17 18:14:49 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2022-01-29 11:46:04 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere
ScopeGuard finalize_guard ( [ & ] {
blocker . finalize ( ) ;
} ) ;
if ( ! blocker . setup_blocker ( ) ) {
blocker . will_unblock_immediately_without_blocking ( Blocker : : UnblockImmediatelyReason : : UnblockConditionAlreadyMet ) ;
return BlockResult : : NotBlocked ;
}
// Relaxed semantics are fine for timeout_unblocked because we
// synchronize on the spin locks already.
Atomic < bool , AK : : MemoryOrder : : memory_order_relaxed > timeout_unblocked ( false ) ;
bool timer_was_added = false ;
switch ( state ( ) ) {
2022-01-30 10:38:50 +00:00
case Thread : : State : : Stopped :
2022-01-29 11:46:04 +00:00
// It's possible that we were requested to be stopped!
break ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Running :
2022-01-29 11:46:04 +00:00
VERIFY ( m_blocker = = nullptr ) ;
break ;
default :
VERIFY_NOT_REACHED ( ) ;
}
m_blocker = & blocker ;
if ( auto & block_timeout = blocker . override_timeout ( timeout ) ; ! block_timeout . is_infinite ( ) ) {
// Process::kill_all_threads may be called at any time, which will mark all
// threads to die. In that case
timer_was_added = TimerQueue : : the ( ) . add_timer_without_id ( * m_block_timer , block_timeout . clock_id ( ) , block_timeout . absolute_time ( ) , [ & ] ( ) {
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
VERIFY ( ! m_block_lock . is_locked_by_current_processor ( ) ) ;
// NOTE: this may execute on the same or any other processor!
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
SpinlockLocker block_lock ( m_block_lock ) ;
if ( m_blocker & & ! timeout_unblocked . exchange ( true ) )
unblock ( ) ;
} ) ;
if ( ! timer_was_added ) {
// Timeout is already in the past
blocker . will_unblock_immediately_without_blocking ( Blocker : : UnblockImmediatelyReason : : TimeoutInThePast ) ;
m_blocker = nullptr ;
return BlockResult : : InterruptedByTimeout ;
}
}
blocker . begin_blocking ( { } ) ;
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Blocked ) ;
2022-01-29 11:46:04 +00:00
block_lock . unlock ( ) ;
2022-08-17 18:14:49 +00:00
scheduler_lock . unlock ( ) ;
2022-01-29 11:46:04 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} blocking on {} ({}) --> " , * this , & blocker , blocker . state_string ( ) ) ;
bool did_timeout = false ;
u32 lock_count_to_restore = 0 ;
auto previous_locked = unlock_process_if_locked ( lock_count_to_restore ) ;
for ( ; ; ) {
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
VERIFY ( Processor : : in_critical ( ) ) ;
yield_without_releasing_big_lock ( ) ;
VERIFY ( Processor : : in_critical ( ) ) ;
SpinlockLocker block_lock2 ( m_block_lock ) ;
if ( m_blocker & & ! m_blocker - > can_be_interrupted ( ) & & ! m_should_die ) {
block_lock2 . unlock ( ) ;
dbgln ( " Thread should not be unblocking, current state: {} " , state_string ( ) ) ;
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Blocked ) ;
2022-01-29 11:46:04 +00:00
continue ;
}
// Prevent the timeout from unblocking this thread if it happens to
// be in the process of firing already
did_timeout | = timeout_unblocked . exchange ( true ) ;
if ( m_blocker ) {
// Remove ourselves...
VERIFY ( m_blocker = = & blocker ) ;
m_blocker = nullptr ;
}
dbgln_if ( THREAD_DEBUG , " <-- Thread {} unblocked from {} ({}) " , * this , & blocker , blocker . state_string ( ) ) ;
break ;
}
// Notify the blocker that we are no longer blocking. It may need
// to clean up now while we're still holding m_lock
auto result = blocker . end_blocking ( { } , did_timeout ) ; // calls was_unblocked internally
if ( timer_was_added & & ! did_timeout ) {
// Cancel the timer while not holding any locks. This allows
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
TimerQueue : : the ( ) . cancel_timer ( * m_block_timer ) ;
}
if ( previous_locked ! = LockMode : : Unlocked ) {
2022-01-29 12:57:39 +00:00
// NOTE: This may trigger another call to Thread::block().
2022-01-29 11:46:04 +00:00
relock_process ( previous_locked , lock_count_to_restore ) ;
}
return result ;
}
2021-09-05 17:02:03 +00:00
void Thread : : block ( Kernel : : Mutex & lock , SpinlockLocker < Spinlock > & lock_lock , u32 lock_count )
2021-07-10 16:23:16 +00:00
{
2021-08-22 10:21:31 +00:00
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
2021-07-10 16:23:16 +00:00
VERIFY ( this = = Thread : : current ( ) ) ;
ScopedCritical critical ;
2021-08-21 23:49:22 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2022-01-30 13:46:07 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
2021-07-10 16:23:16 +00:00
switch ( state ( ) ) {
2022-01-30 10:38:50 +00:00
case Thread : : State : : Stopped :
2021-07-10 16:23:16 +00:00
// It's possible that we were requested to be stopped!
break ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Running :
2021-07-10 16:23:16 +00:00
VERIFY ( m_blocker = = nullptr ) ;
break ;
default :
2022-01-26 11:49:12 +00:00
dbgln ( " Error: Attempting to block with invalid thread state - {} " , state_string ( ) ) ;
2021-07-10 16:23:16 +00:00
VERIFY_NOT_REACHED ( ) ;
}
2021-07-16 21:48:22 +00:00
// If we're blocking on the big-lock we may actually be in the process
2022-01-30 10:43:03 +00:00
// of unblocking from another lock. If that's the case m_blocking_mutex
2021-07-16 21:48:22 +00:00
// is already set
auto & big_lock = process ( ) . big_lock ( ) ;
2022-01-30 10:43:03 +00:00
VERIFY ( ( & lock = = & big_lock & & m_blocking_mutex ! = & big_lock ) | | ! m_blocking_mutex ) ;
2021-07-16 21:48:22 +00:00
2022-01-30 10:43:03 +00:00
auto * previous_blocking_mutex = m_blocking_mutex ;
m_blocking_mutex = & lock ;
2021-07-10 16:23:16 +00:00
m_lock_requested_count = lock_count ;
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Blocked ) ;
2021-07-10 16:23:16 +00:00
block_lock . unlock ( ) ;
2022-08-17 18:14:49 +00:00
scheduler_lock . unlock ( ) ;
2021-07-10 16:23:16 +00:00
lock_lock . unlock ( ) ;
2021-07-17 19:09:51 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} blocking on Mutex {} " , * this , & lock ) ;
2021-07-10 16:23:16 +00:00
for ( ; ; ) {
// Yield to the scheduler, and wait for us to resume unblocked.
2021-08-29 18:10:24 +00:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2021-08-09 23:16:08 +00:00
VERIFY ( Processor : : in_critical ( ) ) ;
2022-01-28 23:47:18 +00:00
if ( & lock ! = & big_lock & & big_lock . is_exclusively_locked_by_current_thread ( ) ) {
2021-07-16 01:38:07 +00:00
// We're locking another lock and already hold the big lock...
// We need to release the big lock
2021-07-16 01:45:22 +00:00
yield_and_release_relock_big_lock ( ) ;
2021-07-16 01:38:07 +00:00
} else {
2021-08-10 19:20:45 +00:00
// By the time we've reached this another thread might have
// marked us as holding the big lock, so this call must not
// verify that we're not holding it.
yield_without_releasing_big_lock ( VerifyLockNotHeld : : No ) ;
2021-07-16 01:38:07 +00:00
}
2021-08-09 23:16:08 +00:00
VERIFY ( Processor : : in_critical ( ) ) ;
2021-07-10 16:23:16 +00:00
2021-08-21 23:49:22 +00:00
SpinlockLocker block_lock2 ( m_block_lock ) ;
2022-01-30 10:43:03 +00:00
VERIFY ( ! m_blocking_mutex ) ;
m_blocking_mutex = previous_blocking_mutex ;
2021-07-10 16:23:16 +00:00
break ;
}
lock_lock . lock ( ) ;
}
2022-01-30 10:43:03 +00:00
u32 Thread : : unblock_from_mutex ( Kernel : : Mutex & mutex )
2021-07-10 16:23:16 +00:00
{
2022-01-30 11:16:14 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2021-08-21 23:49:22 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
2022-01-30 11:16:14 +00:00
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
2022-01-30 10:43:03 +00:00
VERIFY ( m_blocking_mutex = = & mutex ) ;
2022-01-30 11:16:14 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} unblocked from Mutex {} " , * this , & mutex ) ;
2021-07-10 16:23:16 +00:00
auto requested_count = m_lock_requested_count ;
2022-01-30 11:16:14 +00:00
m_blocking_mutex = nullptr ;
if ( Thread : : current ( ) = = this ) {
set_state ( Thread : : State : : Running ) ;
return requested_count ;
2021-07-10 16:23:16 +00:00
}
2022-01-30 11:16:14 +00:00
VERIFY ( m_state ! = Thread : : State : : Runnable & & m_state ! = Thread : : State : : Running ) ;
set_state ( Thread : : State : : Runnable ) ;
2021-07-10 16:23:16 +00:00
return requested_count ;
}
2020-11-29 23:05:27 +00:00
void Thread : : unblock_from_blocker ( Blocker & blocker )
{
2020-12-09 04:18:45 +00:00
auto do_unblock = [ & ] ( ) {
2021-08-21 23:49:22 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
SpinlockLocker block_lock ( m_block_lock ) ;
2020-12-09 04:18:45 +00:00
if ( m_blocker ! = & blocker )
return ;
if ( ! should_be_stopped ( ) & & ! is_stopped ( ) )
unblock ( ) ;
} ;
2021-11-06 21:06:08 +00:00
if ( Processor : : current_in_irq ( ) ! = 0 ) {
2022-02-13 19:21:14 +00:00
Processor : : deferred_call_queue ( [ do_unblock = move ( do_unblock ) , self = try_make_weak_ptr ( ) . release_value_but_fixme_should_propagate_errors ( ) ] ( ) {
2020-12-09 04:18:45 +00:00
if ( auto this_thread = self . strong_ref ( ) )
do_unblock ( ) ;
} ) ;
} else {
do_unblock ( ) ;
}
2019-03-23 21:03:17 +00:00
}
2020-11-29 23:05:27 +00:00
void Thread : : unblock ( u8 signal )
2019-03-23 21:03:17 +00:00
{
2021-08-22 10:21:31 +00:00
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
2021-08-29 18:10:24 +00:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
VERIFY ( m_block_lock . is_locked_by_current_processor ( ) ) ;
2022-01-30 10:38:50 +00:00
if ( m_state ! = Thread : : State : : Blocked )
2020-11-29 23:05:27 +00:00
return ;
2022-01-30 10:43:03 +00:00
if ( m_blocking_mutex )
2021-07-10 16:23:16 +00:00
return ;
2021-02-23 19:42:32 +00:00
VERIFY ( m_blocker ) ;
2020-12-09 04:18:45 +00:00
if ( signal ! = 0 ) {
2021-01-20 23:06:19 +00:00
if ( is_handling_page_fault ( ) ) {
// Don't let signals unblock threads that are blocked inside a page fault handler.
// This prevents threads from EINTR'ing the inode read in an inode page fault.
// FIXME: There's probably a better way to solve this.
return ;
}
2020-12-09 04:18:45 +00:00
if ( ! m_blocker - > can_be_interrupted ( ) & & ! m_should_die )
return ;
2020-11-29 23:05:27 +00:00
m_blocker - > set_interrupted_by_signal ( signal ) ;
2020-12-09 04:18:45 +00:00
}
2020-04-06 12:38:33 +00:00
m_blocker = nullptr ;
2020-06-28 21:34:31 +00:00
if ( Thread : : current ( ) = = this ) {
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Running ) ;
2019-03-23 21:03:17 +00:00
return ;
}
2022-01-30 10:38:50 +00:00
VERIFY ( m_state ! = Thread : : State : : Runnable & & m_state ! = Thread : : State : : Running ) ;
set_state ( Thread : : State : : Runnable ) ;
2019-03-23 21:03:17 +00:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
void Thread : : set_should_die ( )
{
2019-12-22 10:35:02 +00:00
if ( m_should_die ) {
2021-01-12 21:30:52 +00:00
dbgln ( " {} Should already die " , * this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
return ;
2019-12-22 10:35:02 +00:00
}
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
// Remember that we should die instead of returning to
// the userspace.
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-12-08 04:29:41 +00:00
m_should_die = true ;
// NOTE: Even the current thread can technically be in "Stopped"
// state! This is the case when another thread sent a SIGSTOP to
// it while it was running and it calls e.g. exit() before
// the scheduler gets involved again.
if ( is_stopped ( ) ) {
// If we were stopped, we need to briefly resume so that
// the kernel stacks can clean up. We won't ever return back
// to user mode, though
2021-02-23 19:42:32 +00:00
VERIFY ( ! process ( ) . is_stopped ( ) ) ;
2020-12-08 04:29:41 +00:00
resume_from_stopped ( ) ;
2020-08-14 16:24:31 +00:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
if ( is_blocked ( ) ) {
2021-08-21 23:49:22 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
2020-12-08 04:29:41 +00:00
if ( m_blocker ) {
// We're blocked in the kernel.
m_blocker - > set_interrupted_by_death ( ) ;
unblock ( ) ;
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
}
}
void Thread : : die_if_needed ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
if ( ! m_should_die )
return ;
2020-12-14 23:36:22 +00:00
u32 unlock_count ;
2020-12-20 23:09:48 +00:00
[[maybe_unused]] auto rc = unlock_process_if_locked ( unlock_count ) ;
2019-12-22 11:34:38 +00:00
2021-06-06 09:40:11 +00:00
dbgln_if ( THREAD_DEBUG , " Thread {} is dying " , * this ) ;
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-06-06 09:40:11 +00:00
// It's possible that we don't reach the code after this block if the
// scheduler is invoked and FinalizerTask cleans up this thread, however
// that doesn't matter because we're trying to invoke the scheduler anyway
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Dying ) ;
2021-06-06 09:40:11 +00:00
}
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
2020-07-04 23:37:36 +00:00
2020-07-05 20:32:07 +00:00
// Flag a context switch. Because we're in a critical section,
2021-02-10 20:18:03 +00:00
// Scheduler::yield will actually only mark a pending context switch
2020-07-05 20:32:07 +00:00
// Simply leaving the critical section would not necessarily trigger
// a switch.
2020-06-27 19:42:28 +00:00
Scheduler : : yield ( ) ;
2020-07-04 23:37:36 +00:00
2020-07-05 20:32:07 +00:00
// Now leave the critical section so that we can also trigger the
// actual context switch
2021-08-09 23:56:21 +00:00
Processor : : clear_critical ( ) ;
2021-08-22 10:21:31 +00:00
dbgln ( " die_if_needed returned from clear_critical!!! in irq: {} " , Processor : : current_in_irq ( ) ) ;
2020-07-05 20:32:07 +00:00
// We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
}
2020-11-17 03:51:34 +00:00
void Thread : : exit ( void * exit_value )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = this ) ;
2021-08-22 23:22:38 +00:00
m_join_blocker_set . thread_did_exit ( exit_value ) ;
2020-11-17 03:51:34 +00:00
set_should_die ( ) ;
2020-12-14 23:36:22 +00:00
u32 unlock_count ;
2020-12-20 23:09:48 +00:00
[[maybe_unused]] auto rc = unlock_process_if_locked ( unlock_count ) ;
2021-05-28 09:18:58 +00:00
if ( m_thread_specific_range . has_value ( ) ) {
2022-08-23 15:58:05 +00:00
process ( ) . address_space ( ) . with ( [ & ] ( auto & space ) {
auto * region = space - > find_region_from_range ( m_thread_specific_range . value ( ) ) ;
space - > deallocate_region ( * region ) ;
} ) ;
2021-05-28 09:18:58 +00:00
}
2021-06-06 23:15:07 +00:00
# ifdef ENABLE_KERNEL_COVERAGE_COLLECTION
KCOVDevice : : free_thread ( ) ;
# endif
2020-11-17 03:51:34 +00:00
die_if_needed ( ) ;
}
2021-08-10 19:20:45 +00:00
void Thread : : yield_without_releasing_big_lock ( VerifyLockNotHeld verify_lock_not_held )
2020-12-08 04:29:41 +00:00
{
2021-08-29 18:10:24 +00:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2022-01-28 23:47:18 +00:00
VERIFY ( verify_lock_not_held = = VerifyLockNotHeld : : No | | ! process ( ) . big_lock ( ) . is_exclusively_locked_by_current_thread ( ) ) ;
2021-07-10 16:23:16 +00:00
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable ;
Scheduler : : yield ( ) ; // flag a switch
2021-08-09 23:56:21 +00:00
u32 prev_critical = Processor : : clear_critical ( ) ;
2020-12-08 04:29:41 +00:00
// NOTE: We may be on a different CPU now!
2021-08-09 23:56:21 +00:00
Processor : : restore_critical ( prev_critical ) ;
2020-12-08 04:29:41 +00:00
}
2021-07-16 01:45:22 +00:00
void Thread : : yield_and_release_relock_big_lock ( )
2019-03-23 21:03:17 +00:00
{
2021-08-29 18:10:24 +00:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2021-07-10 16:23:16 +00:00
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable ;
Scheduler : : yield ( ) ; // flag a switch
2020-12-14 23:36:22 +00:00
u32 lock_count_to_restore = 0 ;
auto previous_locked = unlock_process_if_locked ( lock_count_to_restore ) ;
2020-09-26 03:44:43 +00:00
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
2020-12-14 23:36:22 +00:00
relock_process ( previous_locked , lock_count_to_restore ) ;
2019-03-23 21:03:17 +00:00
}
2019-12-01 10:57:20 +00:00
2020-12-14 23:36:22 +00:00
LockMode Thread : : unlock_process_if_locked ( u32 & lock_count_to_restore )
2020-12-09 04:18:45 +00:00
{
2022-01-28 23:47:18 +00:00
return process ( ) . big_lock ( ) . force_unlock_exclusive_if_locked ( lock_count_to_restore ) ;
2020-12-09 04:18:45 +00:00
}
2020-12-14 23:36:22 +00:00
void Thread : : relock_process ( LockMode previous_locked , u32 lock_count_to_restore )
2019-12-01 14:54:47 +00:00
{
2020-09-26 03:44:43 +00:00
// Clearing the critical section may trigger the context switch
2021-07-05 21:07:18 +00:00
// flagged by calling Scheduler::yield above.
// We have to do it this way because we intentionally
2020-09-26 03:44:43 +00:00
// leave the critical section here to be able to switch contexts.
2021-08-09 23:56:21 +00:00
u32 prev_critical = Processor : : clear_critical ( ) ;
2021-01-27 22:23:21 +00:00
// CONTEXT SWITCH HAPPENS HERE!
2020-09-26 03:44:43 +00:00
2021-01-27 22:23:21 +00:00
// NOTE: We may be on a different CPU now!
2021-08-09 23:56:21 +00:00
Processor : : restore_critical ( prev_critical ) ;
2020-12-14 23:36:22 +00:00
if ( previous_locked ! = LockMode : : Unlocked ) {
// We've unblocked, relock the process if needed and carry on.
2022-01-28 23:47:18 +00:00
process ( ) . big_lock ( ) . restore_exclusive_lock ( lock_count_to_restore ) ;
2020-12-14 23:36:22 +00:00
}
2019-12-01 10:57:20 +00:00
}
2019-03-23 21:03:17 +00:00
2021-10-31 21:54:39 +00:00
// NOLINTNEXTLINE(readability-make-member-function-const) False positive; We call block<SleepBlocker> which is not const
2022-04-01 17:58:27 +00:00
auto Thread : : sleep ( clockid_t clock_id , Time const & duration , Time * remaining_time ) - > BlockResult
2019-03-23 21:03:17 +00:00
{
2022-01-30 10:38:50 +00:00
VERIFY ( state ( ) = = Thread : : State : : Running ) ;
2021-01-10 23:29:28 +00:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( { } , Thread : : BlockTimeout ( false , & duration , nullptr , clock_id ) , remaining_time ) ;
2020-11-15 18:58:19 +00:00
}
2021-10-31 21:54:39 +00:00
// NOLINTNEXTLINE(readability-make-member-function-const) False positive; We call block<SleepBlocker> which is not const
2022-04-01 17:58:27 +00:00
auto Thread : : sleep_until ( clockid_t clock_id , Time const & deadline ) - > BlockResult
2020-11-15 18:58:19 +00:00
{
2022-01-30 10:38:50 +00:00
VERIFY ( state ( ) = = Thread : : State : : Running ) ;
2021-01-10 23:29:28 +00:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( { } , Thread : : BlockTimeout ( true , & deadline , nullptr , clock_id ) ) ;
2019-03-23 21:03:17 +00:00
}
2021-08-05 18:48:14 +00:00
StringView Thread : : state_string ( ) const
2019-03-23 21:03:17 +00:00
{
2019-07-19 07:51:48 +00:00
switch ( state ( ) ) {
2022-01-30 10:38:50 +00:00
case Thread : : State : : Invalid :
2021-08-05 18:48:14 +00:00
return " Invalid " sv ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Runnable :
2021-08-05 18:48:14 +00:00
return " Runnable " sv ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Running :
2021-08-05 18:48:14 +00:00
return " Running " sv ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Dying :
2021-08-05 18:48:14 +00:00
return " Dying " sv ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Dead :
2021-08-05 18:48:14 +00:00
return " Dead " sv ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Stopped :
2021-08-05 18:48:14 +00:00
return " Stopped " sv ;
2022-01-30 10:38:50 +00:00
case Thread : : State : : Blocked : {
2021-08-21 23:49:22 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
2022-01-30 10:43:03 +00:00
if ( m_blocking_mutex )
2021-08-05 18:48:14 +00:00
return " Mutex " sv ;
2021-07-10 16:23:16 +00:00
if ( m_blocker )
return m_blocker - > state_string ( ) ;
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
}
2020-09-27 14:53:35 +00:00
}
2021-03-09 21:35:13 +00:00
PANIC ( " Thread::state_string(): Invalid state: {} " , ( int ) state ( ) ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : finalize ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = g_finalizer ) ;
VERIFY ( Thread : : current ( ) ! = this ) ;
2019-08-01 18:01:23 +00:00
2021-01-23 22:29:11 +00:00
# if LOCK_DEBUG
2021-08-30 00:47:40 +00:00
VERIFY ( ! m_lock . is_locked_by_current_processor ( ) ) ;
2020-12-01 02:04:36 +00:00
if ( lock_count ( ) > 0 ) {
2021-01-18 16:25:44 +00:00
dbgln ( " Thread {} leaking {} Locks! " , * this , lock_count ( ) ) ;
2021-08-21 23:49:22 +00:00
SpinlockLocker list_lock ( m_holding_locks_lock ) ;
2021-04-24 22:17:02 +00:00
for ( auto & info : m_holding_locks_list ) {
2022-04-01 17:58:27 +00:00
auto const & location = info . lock_location ;
2021-07-17 19:09:51 +00:00
dbgln ( " - Mutex: \" {} \" @ {} locked in function \" {} \" at \" {}:{} \" with a count of: {} " , info . lock - > name ( ) , info . lock , location . function_name ( ) , location . filename ( ) , location . line_number ( ) , info . count ) ;
2021-04-24 22:17:02 +00:00
}
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2020-12-01 02:04:36 +00:00
}
# endif
2020-10-26 02:22:59 +00:00
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Finalizing thread {} " , * this ) ;
2020-10-26 02:22:59 +00:00
set_state ( Thread : : State : : Dead ) ;
2021-08-22 23:22:38 +00:00
m_join_blocker_set . thread_finalizing ( ) ;
2019-11-14 19:58:23 +00:00
}
2022-01-15 19:19:41 +00:00
if ( m_dump_backtrace_on_finalization ) {
auto trace_or_error = backtrace ( ) ;
if ( ! trace_or_error . is_error ( ) ) {
auto trace = trace_or_error . release_value ( ) ;
dbgln ( " Backtrace: " ) ;
kernelputstr ( trace - > characters ( ) , trace - > length ( ) ) ;
}
}
2020-09-27 14:53:35 +00:00
2022-01-26 16:34:04 +00:00
drop_thread_count ( ) ;
2021-01-01 05:45:16 +00:00
}
2020-09-27 14:53:35 +00:00
2022-01-26 16:34:04 +00:00
void Thread : : drop_thread_count ( )
2021-01-01 05:45:16 +00:00
{
2021-01-23 06:24:33 +00:00
bool is_last = process ( ) . remove_thread ( * this ) ;
2022-01-26 16:34:04 +00:00
if ( is_last )
2020-12-09 04:18:45 +00:00
process ( ) . finalize ( ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : finalize_dying_threads ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = g_finalizer ) ;
2019-04-20 12:02:19 +00:00
Vector < Thread * , 32 > dying_threads ;
2019-03-23 21:03:17 +00:00
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2019-06-07 09:43:58 +00:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2022-01-26 16:34:31 +00:00
if ( ! thread . is_finalizable ( ) )
return ;
auto result = dying_threads . try_append ( & thread ) ;
// We ignore allocation failures above the first 32 guaranteed thread slots, and
// just flag our future-selves to finalize these threads at a later point
if ( result . is_error ( ) )
g_finalizer_has_work . store ( true , AK : : MemoryOrder : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
} ) ;
}
2019-12-22 10:35:02 +00:00
for ( auto * thread : dying_threads ) {
2022-08-19 18:53:40 +00:00
LockRefPtr < Process > process = thread - > process ( ) ;
2021-07-01 16:18:38 +00:00
dbgln_if ( PROCESS_DEBUG , " Before finalization, {} has {} refs and its process has {} " ,
* thread , thread - > ref_count ( ) , thread - > process ( ) . ref_count ( ) ) ;
2019-03-23 21:03:17 +00:00
thread - > finalize ( ) ;
2021-07-01 16:18:38 +00:00
dbgln_if ( PROCESS_DEBUG , " After finalization, {} has {} refs and its process has {} " ,
* thread , thread - > ref_count ( ) , thread - > process ( ) . ref_count ( ) ) ;
2020-09-27 14:53:35 +00:00
// This thread will never execute again, drop the running reference
// NOTE: This may not necessarily drop the last reference if anything
// else is still holding onto this thread!
thread - > unref ( ) ;
2019-12-22 10:35:02 +00:00
}
2019-03-23 21:03:17 +00:00
}
2021-07-15 03:46:32 +00:00
void Thread : : update_time_scheduled ( u64 current_scheduler_time , bool is_kernel , bool no_longer_running )
{
if ( m_last_time_scheduled . has_value ( ) ) {
u64 delta ;
if ( current_scheduler_time > = m_last_time_scheduled . value ( ) )
delta = current_scheduler_time - m_last_time_scheduled . value ( ) ;
else
delta = m_last_time_scheduled . value ( ) - current_scheduler_time ; // the unlikely event that the clock wrapped
if ( delta ! = 0 ) {
// Add it to the global total *before* updating the thread's value!
Scheduler : : add_time_scheduled ( delta , is_kernel ) ;
auto & total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user ;
2022-08-19 11:54:14 +00:00
total_time . fetch_add ( delta , AK : : memory_order_relaxed ) ;
2021-07-15 03:46:32 +00:00
}
}
if ( no_longer_running )
m_last_time_scheduled = { } ;
else
m_last_time_scheduled = current_scheduler_time ;
}
2021-01-25 23:37:36 +00:00
bool Thread : : tick ( )
2019-03-23 21:03:17 +00:00
{
2021-01-25 23:37:36 +00:00
if ( previous_mode ( ) = = PreviousMode : : KernelMode ) {
2020-08-02 02:04:56 +00:00
+ + m_process - > m_ticks_in_kernel ;
2020-12-04 05:12:50 +00:00
+ + m_ticks_in_kernel ;
} else {
+ + m_process - > m_ticks_in_user ;
+ + m_ticks_in_user ;
}
2021-11-06 21:06:08 +00:00
- - m_ticks_left ;
return m_ticks_left ! = 0 ;
2019-03-23 21:03:17 +00:00
}
2020-12-08 04:29:41 +00:00
void Thread : : check_dispatch_pending_signal ( )
{
auto result = DispatchSignalResult : : Continue ;
{
2021-08-21 23:49:22 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2021-11-06 21:06:08 +00:00
if ( pending_signals_for_state ( ) ! = 0 ) {
2020-12-08 04:29:41 +00:00
result = dispatch_one_pending_signal ( ) ;
}
}
2021-08-22 08:44:43 +00:00
if ( result = = DispatchSignalResult : : Yield ) {
2021-08-10 19:20:45 +00:00
yield_without_releasing_big_lock ( ) ;
2020-12-08 04:29:41 +00:00
}
}
2020-09-09 02:37:15 +00:00
u32 Thread : : pending_signals ( ) const
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-11-29 23:05:27 +00:00
return pending_signals_for_state ( ) ;
}
u32 Thread : : pending_signals_for_state ( ) const
{
2021-08-29 18:10:24 +00:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2020-11-29 23:05:27 +00:00
constexpr u32 stopped_signal_mask = ( 1 < < ( SIGCONT - 1 ) ) | ( 1 < < ( SIGKILL - 1 ) ) | ( 1 < < ( SIGTRAP - 1 ) ) ;
2021-01-20 23:06:19 +00:00
if ( is_handling_page_fault ( ) )
return 0 ;
2022-01-30 10:38:50 +00:00
return m_state ! = State : : Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask ;
2020-09-09 02:37:15 +00:00
}
2020-02-01 09:27:25 +00:00
void Thread : : send_signal ( u8 signal , [[maybe_unused]] Process * sender )
2019-03-23 21:03:17 +00:00
{
2022-07-21 21:08:07 +00:00
VERIFY ( signal < NSIG ) ;
2022-02-21 17:53:39 +00:00
VERIFY ( process ( ) . is_user_process ( ) ) ;
2021-08-21 23:49:22 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2019-07-08 16:59:48 +00:00
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
2021-02-07 12:03:24 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal {} was ignored by {} " , signal , process ( ) ) ;
2019-07-08 16:59:48 +00:00
return ;
}
2019-03-23 21:03:17 +00:00
2021-01-23 22:59:27 +00:00
if constexpr ( SIGNAL_DEBUG ) {
2021-01-12 21:30:52 +00:00
if ( sender )
dbgln ( " Signal: {} sent {} to {} " , * sender , signal , process ( ) ) ;
else
dbgln ( " Signal: Kernel send {} to {} " , signal , process ( ) ) ;
}
2019-03-23 21:03:17 +00:00
2019-08-01 09:00:36 +00:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2022-02-26 11:59:31 +00:00
m_signal_senders [ signal ] = sender ? sender - > pid ( ) : pid ( ) ;
2021-11-06 21:06:08 +00:00
m_have_any_unmasked_pending_signals . store ( ( pending_signals_for_state ( ) & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2021-12-11 23:01:42 +00:00
m_signal_blocker_set . unblock_all_blockers_whose_conditions_are_met ( ) ;
2020-11-29 23:05:27 +00:00
2021-12-11 20:37:56 +00:00
if ( ! has_unmasked_pending_signals ( ) )
return ;
2022-01-30 10:38:50 +00:00
if ( m_state = = Thread : : State : : Stopped ) {
2021-11-06 21:06:08 +00:00
if ( pending_signals_for_state ( ) ! = 0 ) {
2021-02-07 12:03:24 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal: Resuming stopped {} to deliver signal {} " , * this , signal ) ;
2020-11-29 23:05:27 +00:00
resume_from_stopped ( ) ;
2020-12-08 04:29:41 +00:00
}
2020-11-29 23:05:27 +00:00
} else {
2021-08-21 23:49:22 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
2021-02-07 12:03:24 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal: Unblocking {} to deliver signal {} " , * this , signal ) ;
2020-11-29 23:05:27 +00:00
unblock ( signal ) ;
}
2019-03-23 21:03:17 +00:00
}
2020-09-09 02:37:15 +00:00
u32 Thread : : update_signal_mask ( u32 signal_mask )
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-09-09 02:37:15 +00:00
auto previous_signal_mask = m_signal_mask ;
m_signal_mask = signal_mask ;
2021-11-06 21:06:08 +00:00
m_have_any_unmasked_pending_signals . store ( ( pending_signals_for_state ( ) & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2020-09-09 02:37:15 +00:00
return previous_signal_mask ;
}
u32 Thread : : signal_mask ( ) const
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-09-09 02:37:15 +00:00
return m_signal_mask ;
}
u32 Thread : : signal_mask_block ( sigset_t signal_set , bool block )
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-09-09 02:37:15 +00:00
auto previous_signal_mask = m_signal_mask ;
if ( block )
m_signal_mask | = signal_set ;
2021-12-11 22:08:57 +00:00
else
m_signal_mask & = ~ signal_set ;
2021-11-06 21:06:08 +00:00
m_have_any_unmasked_pending_signals . store ( ( pending_signals_for_state ( ) & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2020-09-09 02:37:15 +00:00
return previous_signal_mask ;
}
2021-12-11 15:40:50 +00:00
void Thread : : reset_signals_for_exec ( )
2020-09-09 02:37:15 +00:00
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-12-11 15:18:39 +00:00
// The signal mask is preserved across execve(2).
2021-12-11 15:40:50 +00:00
// The pending signal set is preserved across an execve(2).
2020-09-09 02:37:15 +00:00
m_have_any_unmasked_pending_signals . store ( false , AK : : memory_order_release ) ;
2022-02-24 18:55:49 +00:00
m_signal_action_masks . fill ( { } ) ;
2021-12-11 15:39:52 +00:00
// A successful call to execve(2) removes any existing alternate signal stack
m_alternative_signal_stack = 0 ;
m_alternative_signal_stack_size = 0 ;
2020-09-09 02:37:15 +00:00
}
2019-10-07 09:22:50 +00:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
2021-02-23 19:42:32 +00:00
VERIFY ( Thread : : current ( ) = = this ) ;
2020-11-29 23:05:27 +00:00
DispatchSignalResult result ;
{
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-11-29 23:05:27 +00:00
result = dispatch_signal ( signal ) ;
}
2021-12-06 17:33:19 +00:00
if ( result = = DispatchSignalResult : : Terminate ) {
Thread : : current ( ) - > die_if_needed ( ) ;
VERIFY_NOT_REACHED ( ) ; // dispatch_signal will request termination of the thread, so the above call should never return
}
2020-11-29 23:05:27 +00:00
if ( result = = DispatchSignalResult : : Yield )
2021-07-16 01:45:22 +00:00
yield_and_release_relock_big_lock ( ) ;
2019-10-07 09:22:50 +00:00
}
2020-11-29 23:05:27 +00:00
DispatchSignalResult Thread : : dispatch_one_pending_signal ( )
2019-03-23 21:03:17 +00:00
{
2022-08-19 12:39:15 +00:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2020-11-29 23:05:27 +00:00
u32 signal_candidates = pending_signals_for_state ( ) & ~ m_signal_mask ;
2020-12-01 15:05:49 +00:00
if ( signal_candidates = = 0 )
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
2019-08-01 09:00:36 +00:00
u8 signal = 1 ;
2022-07-21 21:08:07 +00:00
for ( ; signal < NSIG ; + + signal ) {
2021-11-06 21:06:08 +00:00
if ( ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) ! = 0 ) {
2019-03-23 21:03:17 +00:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2020-11-29 23:05:27 +00:00
DispatchSignalResult Thread : : try_dispatch_one_pending_signal ( u8 signal )
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal ! = 0 ) ;
2021-08-21 23:49:22 +00:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2020-11-29 23:05:27 +00:00
u32 signal_candidates = pending_signals_for_state ( ) & ~ m_signal_mask ;
2021-11-06 21:06:08 +00:00
if ( ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) = = 0 )
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
return dispatch_signal ( signal ) ;
}
2019-06-07 15:13:23 +00:00
enum class DefaultSignalAction {
2019-03-23 21:03:17 +00:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
Kernel: Mark compilation-unit-only functions as static
This enables a nice warning in case a function becomes dead code. Also, in case
of signal_trampoline_dummy, marking it external (non-static) prevents it from
being 'optimized away', which would lead to surprising and weird linker errors.
I found these places by using -Wmissing-declarations.
The Kernel still shows these issues, which I think are false-positives,
but don't want to touch:
- Kernel/Arch/i386/CPU.cpp:1081:17: void Kernel::enter_thread_context(Kernel::Thread*, Kernel::Thread*)
- Kernel/Arch/i386/CPU.cpp:1170:17: void Kernel::context_first_init(Kernel::Thread*, Kernel::Thread*, Kernel::TrapFrame*)
- Kernel/Arch/i386/CPU.cpp:1304:16: u32 Kernel::do_init_context(Kernel::Thread*, u32)
- Kernel/Arch/i386/CPU.cpp:1347:17: void Kernel::pre_init_finished()
- Kernel/Arch/i386/CPU.cpp:1360:17: void Kernel::post_init_finished()
No idea, not gonna touch it.
- Kernel/init.cpp:104:30: void Kernel::init()
- Kernel/init.cpp:167:30: void Kernel::init_ap(u32, Kernel::Processor*)
- Kernel/init.cpp:184:17: void Kernel::init_finished(u32)
Called by boot.S.
- Kernel/init.cpp:383:16: int Kernel::__cxa_atexit(void (*)(void*), void*, void*)
- Kernel/StdLib.cpp:285:19: void __cxa_pure_virtual()
- Kernel/StdLib.cpp:300:19: void __stack_chk_fail()
- Kernel/StdLib.cpp:305:19: void __stack_chk_fail_local()
Not sure how to tell the compiler that the compiler is already using them.
Also, maybe __cxa_atexit should go into StdLib.cpp?
- Kernel/Modules/TestModule.cpp:31:17: void module_init()
- Kernel/Modules/TestModule.cpp:40:17: void module_fini()
Could maybe go into a new header. This would also provide type-checking for new modules.
2020-08-10 19:12:13 +00:00
static DefaultSignalAction default_signal_action ( u8 signal )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY ( signal & & signal < NSIG ) ;
2019-03-23 21:03:17 +00:00
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
2022-06-12 12:48:28 +00:00
case SIGCANCEL :
2019-03-23 21:03:17 +00:00
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
2020-09-08 16:07:25 +00:00
case SIGINFO :
2019-03-23 21:03:17 +00:00
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
2021-08-22 08:44:43 +00:00
default :
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
}
}
2019-07-08 16:59:48 +00:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
2022-07-21 21:08:07 +00:00
VERIFY ( signal < NSIG ) ;
2022-02-24 18:55:49 +00:00
auto const & action = m_process - > m_signal_action_data [ signal ] ;
2019-07-08 16:59:48 +00:00
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
2021-10-31 22:52:43 +00:00
return ( ( sighandler_t ) action . handler_or_sigaction . get ( ) = = SIG_IGN ) ;
2019-07-08 16:59:48 +00:00
}
2019-10-07 09:22:50 +00:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
2022-07-21 21:08:07 +00:00
VERIFY ( signal < NSIG ) ;
2022-02-24 18:55:49 +00:00
auto const & action = m_process - > m_signal_action_data [ signal ] ;
2019-10-07 09:22:50 +00:00
return ! action . handler_or_sigaction . is_null ( ) ;
}
2021-11-29 23:07:59 +00:00
bool Thread : : is_signal_masked ( u8 signal ) const
{
2022-07-21 21:08:07 +00:00
VERIFY ( signal < NSIG ) ;
2021-11-29 23:07:59 +00:00
return ( 1 < < ( signal - 1 ) ) & m_signal_mask ;
}
2021-10-28 20:33:41 +00:00
bool Thread : : has_alternative_signal_stack ( ) const
{
return m_alternative_signal_stack_size ! = 0 ;
}
bool Thread : : is_in_alternative_signal_stack ( ) const
{
auto sp = get_register_dump_from_stack ( ) . userspace_sp ( ) ;
return sp > = m_alternative_signal_stack & & sp < m_alternative_signal_stack + m_alternative_signal_stack_size ;
}
2021-11-29 23:21:03 +00:00
static ErrorOr < void > push_value_on_user_stack ( FlatPtr & stack , FlatPtr data )
2019-11-04 08:29:47 +00:00
{
Kernel: Fix UB caused by taking a reference to a packed struct's member
Taking a reference or a pointer to a value that's not aligned properly
is undefined behavior. While `[[gnu::packed]]` ensures that reads from
and writes to fields of packed structs is a safe operation, the
information about the reduced alignment is lost when creating pointers
to these values.
Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these,
even though the doc of `-Waddress-of-packed-member` says that it usually
leads to UB. In contrast, x86_64 Clang does flag these, which renders
the 64-bit kernel unable to boot.
For now, the `address-of-packed-member` warning will only be enabled in
the kernel, as it is absolutely crucial there because of KUBSAN, but
might get excessively noisy for the userland in the future.
Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other
flags in the kernel, because flags added via `add_compile_options` come
after these, so the `-Wno-address-of-packed-member` in the root would
cancel it out.
2021-08-01 18:30:43 +00:00
stack - = sizeof ( FlatPtr ) ;
2021-11-29 23:21:03 +00:00
return copy_to_user ( ( FlatPtr * ) stack , & data ) ;
2019-11-04 08:29:47 +00:00
}
2022-02-25 21:28:06 +00:00
template < typename T >
static ErrorOr < void > copy_value_on_user_stack ( FlatPtr & stack , T const & data )
{
stack - = sizeof ( data ) ;
return copy_to_user ( ( RemoveCVReference < T > * ) stack , & data ) ;
}
2020-08-14 16:24:31 +00:00
void Thread : : resume_from_stopped ( )
{
2021-02-23 19:42:32 +00:00
VERIFY ( is_stopped ( ) ) ;
VERIFY ( m_stop_state ! = State : : Invalid ) ;
2021-08-29 18:10:24 +00:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2022-01-30 10:38:50 +00:00
if ( m_stop_state = = Thread : : State : : Blocked ) {
2021-08-21 23:49:22 +00:00
SpinlockLocker block_lock ( m_block_lock ) ;
2022-01-30 10:43:03 +00:00
if ( m_blocker | | m_blocking_mutex ) {
2020-12-09 04:18:45 +00:00
// Hasn't been unblocked yet
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Blocked , 0 ) ;
2020-12-09 04:18:45 +00:00
} else {
// Was unblocked while stopped
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Runnable ) ;
2020-12-09 04:18:45 +00:00
}
} else {
set_state ( m_stop_state , 0 ) ;
}
2020-08-14 16:24:31 +00:00
}
2020-11-29 23:05:27 +00:00
DispatchSignalResult Thread : : dispatch_signal ( u8 signal )
2019-03-23 21:03:17 +00:00
{
2021-02-23 19:42:32 +00:00
VERIFY_INTERRUPTS_DISABLED ( ) ;
2021-08-29 18:10:24 +00:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2022-07-21 21:08:07 +00:00
VERIFY ( signal > 0 & & signal < = NSIG ) ;
2021-02-23 19:42:32 +00:00
VERIFY ( process ( ) . is_user_process ( ) ) ;
VERIFY ( this = = Thread : : current ( ) ) ;
2019-03-23 21:03:17 +00:00
2021-03-09 21:35:13 +00:00
dbgln_if ( SIGNAL_DEBUG , " Dispatch signal {} to {}, state: {} " , signal , * this , state_string ( ) ) ;
2019-03-23 21:03:17 +00:00
2022-01-30 10:38:50 +00:00
if ( m_state = = Thread : : State : : Invalid | | ! is_initialized ( ) ) {
2020-09-07 14:31:00 +00:00
// Thread has barely been created, we need to wait until it is
// at least in Runnable state and is_initialized() returns true,
// which indicates that it is fully set up an we actually have
// a register state on the stack that we can modify
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Deferred ;
}
2022-02-24 18:55:49 +00:00
auto & action = m_process - > m_signal_action_data [ signal ] ;
2022-02-26 11:59:31 +00:00
auto sender_pid = m_signal_senders [ signal ] ;
2022-11-02 20:26:02 +00:00
auto sender = Process : : from_pid_ignoring_jails ( sender_pid ) ;
2019-03-23 21:03:17 +00:00
2022-02-26 20:50:17 +00:00
if ( ! current_trap ( ) & & ! action . handler_or_sigaction . is_null ( ) ) {
// We're trying dispatch a handled signal to a user process that was scheduled
// after a yielding/blocking kernel thread, we don't have a register capture of
// the thread, so just defer processing the signal to later.
return DispatchSignalResult : : Deferred ;
}
2019-03-23 21:03:17 +00:00
// Mark this signal as handled.
2019-08-01 09:00:36 +00:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2021-11-06 21:06:08 +00:00
m_have_any_unmasked_pending_signals . store ( ( m_pending_signals & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
2020-12-09 04:18:45 +00:00
auto & process = this - > process ( ) ;
2021-10-31 22:36:52 +00:00
auto * tracer = process . tracer ( ) ;
2020-12-09 04:18:45 +00:00
if ( signal = = SIGSTOP | | ( tracer & & default_signal_action ( signal ) = = DefaultSignalAction : : DumpCore ) ) {
2021-03-09 21:35:13 +00:00
dbgln_if ( SIGNAL_DEBUG , " Signal {} stopping this thread " , signal ) ;
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Stopped , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Yield ;
2019-03-23 21:03:17 +00:00
}
2020-12-08 04:29:41 +00:00
if ( signal = = SIGCONT ) {
2021-01-12 21:30:52 +00:00
dbgln ( " signal: SIGCONT resuming {} " , * this ) ;
2020-08-14 16:24:31 +00:00
} else {
2020-12-09 04:18:45 +00:00
if ( tracer ) {
2020-03-28 08:47:16 +00:00
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
2020-12-09 04:18:45 +00:00
if ( ! tracer - > has_pending_signal ( signal ) ) {
2021-01-12 21:30:52 +00:00
dbgln ( " signal: {} stopping {} for tracer " , signal , * this ) ;
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Stopped , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Yield ;
2020-03-28 08:47:16 +00:00
}
2020-12-09 04:18:45 +00:00
tracer - > unset_signal ( signal ) ;
2020-03-28 08:47:16 +00:00
}
2020-03-01 14:14:17 +00:00
}
2019-03-23 21:03:17 +00:00
2019-06-07 10:56:50 +00:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-23 21:03:17 +00:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
2022-01-30 10:38:50 +00:00
set_state ( Thread : : State : : Stopped , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Yield ;
2019-08-06 17:43:07 +00:00
case DefaultSignalAction : : DumpCore :
2021-08-22 12:51:04 +00:00
process . set_should_generate_coredump ( true ) ;
2020-12-09 04:18:45 +00:00
process . for_each_thread ( [ ] ( auto & thread ) {
2019-08-06 17:43:07 +00:00
thread . set_dump_backtrace_on_finalization ( ) ;
} ) ;
2019-07-25 19:02:19 +00:00
[[fallthrough]] ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Terminate :
2020-08-02 02:04:56 +00:00
m_process - > terminate_due_to_signal ( signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Terminate ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Ignore :
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Continue :
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
}
2021-02-23 19:42:32 +00:00
VERIFY_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
}
2021-08-14 15:05:53 +00:00
if ( ( sighandler_t ) handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2021-03-09 21:35:13 +00:00
dbgln_if ( SIGNAL_DEBUG , " Ignored signal {} " , signal ) ;
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
}
2021-09-06 15:22:36 +00:00
ScopedAddressSpaceSwitcher switcher ( m_process ) ;
2019-09-04 13:14:54 +00:00
2022-06-30 11:36:03 +00:00
m_currently_handled_signal = signal ;
2019-07-03 19:17:35 +00:00
u32 old_signal_mask = m_signal_mask ;
2022-02-24 18:55:49 +00:00
u32 new_signal_mask = m_signal_action_masks [ signal ] . value_or ( action . mask ) ;
2021-11-06 21:06:08 +00:00
if ( ( action . flags & SA_NODEFER ) = = SA_NODEFER )
2019-08-01 09:00:36 +00:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-23 21:03:17 +00:00
else
2019-08-01 09:00:36 +00:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-23 21:03:17 +00:00
m_signal_mask | = new_signal_mask ;
2021-11-06 21:06:08 +00:00
m_have_any_unmasked_pending_signals . store ( ( m_pending_signals & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
2021-10-28 20:33:41 +00:00
bool use_alternative_stack = ( ( action . flags & SA_ONSTACK ) ! = 0 ) & & has_alternative_signal_stack ( ) & & ! is_in_alternative_signal_stack ( ) ;
2021-11-29 23:21:03 +00:00
auto setup_stack = [ & ] ( RegisterState & state ) - > ErrorOr < void > {
2021-10-28 20:33:41 +00:00
FlatPtr stack ;
if ( use_alternative_stack )
stack = m_alternative_signal_stack + m_alternative_signal_stack_size ;
else
2022-02-25 21:28:06 +00:00
stack = state . userspace_sp ( ) ;
dbgln_if ( SIGNAL_DEBUG , " Setting up user stack to return to IP {:p}, SP {:p} " , state . ip ( ) , state . userspace_sp ( ) ) ;
__ucontext ucontext {
. uc_link = nullptr ,
. uc_sigmask = old_signal_mask ,
. uc_stack = {
2022-02-26 11:59:31 +00:00
. ss_sp = bit_cast < void * > ( stack ) ,
. ss_flags = action . flags & SA_ONSTACK ,
. ss_size = use_alternative_stack ? m_alternative_signal_stack_size : 0 ,
2022-02-25 21:28:06 +00:00
} ,
. uc_mcontext = { } ,
} ;
copy_kernel_registers_into_ptrace_registers ( static_cast < PtraceRegisters & > ( ucontext . uc_mcontext ) , state ) ;
2022-02-26 11:59:31 +00:00
auto fill_signal_info_for_signal = [ & ] ( siginfo & signal_info ) {
if ( signal = = SIGCHLD ) {
if ( ! sender ) {
signal_info . si_code = CLD_EXITED ;
return ;
}
auto const * thread = sender - > thread_list ( ) . with ( [ ] ( auto & list ) { return list . is_empty ( ) ? nullptr : list . first ( ) ; } ) ;
if ( ! thread ) {
signal_info . si_code = CLD_EXITED ;
return ;
}
switch ( thread - > m_state ) {
case State : : Dead :
if ( sender - > should_generate_coredump ( ) & & sender - > is_dumpable ( ) ) {
signal_info . si_code = CLD_DUMPED ;
signal_info . si_status = sender - > termination_signal ( ) ;
return ;
}
[[fallthrough]] ;
case State : : Dying :
if ( sender - > termination_signal ( ) = = 0 ) {
signal_info . si_code = CLD_EXITED ;
signal_info . si_status = sender - > termination_status ( ) ;
return ;
}
signal_info . si_code = CLD_KILLED ;
signal_info . si_status = sender - > termination_signal ( ) ;
return ;
case State : : Runnable :
case State : : Running :
case State : : Blocked :
signal_info . si_code = CLD_CONTINUED ;
return ;
case State : : Stopped :
signal_info . si_code = CLD_STOPPED ;
return ;
case State : : Invalid :
// Something is wrong, but we're just an observer.
break ;
}
}
signal_info . si_code = SI_NOINFO ;
} ;
2022-02-25 21:28:06 +00:00
siginfo signal_info {
. si_signo = signal ,
2022-02-26 11:59:31 +00:00
// Filled in below by fill_signal_info_for_signal.
. si_code = 0 ,
// Set for SI_TIMER, we don't have the data here.
2022-02-25 21:28:06 +00:00
. si_errno = 0 ,
2022-02-26 11:59:31 +00:00
. si_pid = sender_pid . value ( ) ,
2022-08-20 22:21:01 +00:00
. si_uid = sender ? sender - > credentials ( ) - > uid ( ) . value ( ) : 0 ,
2022-02-26 11:59:31 +00:00
// Set for SIGILL, SIGFPE, SIGSEGV and SIGBUS
// FIXME: We don't generate these signals in a way that can be handled.
2022-02-25 21:28:06 +00:00
. si_addr = 0 ,
2022-02-26 11:59:31 +00:00
// Set for SIGCHLD.
2022-02-25 21:28:06 +00:00
. si_status = 0 ,
2022-02-26 11:59:31 +00:00
// Set for SIGPOLL, we don't have SIGPOLL.
2022-02-25 21:28:06 +00:00
. si_band = 0 ,
2022-02-26 11:59:31 +00:00
// Set for SI_QUEUE, SI_TIMER, SI_ASYNCIO and SI_MESGQ
// We do not generate any of these.
2022-02-25 21:28:06 +00:00
. si_value = {
. sival_int = 0 ,
} ,
} ;
2021-10-28 20:33:41 +00:00
2022-02-26 11:59:31 +00:00
if ( action . flags & SA_SIGINFO )
fill_signal_info_for_signal ( signal_info ) ;
2021-02-25 15:18:36 +00:00
# if ARCH(I386)
2022-02-25 21:28:06 +00:00
constexpr static FlatPtr thread_red_zone_size = 0 ;
# elif ARCH(X86_64)
constexpr static FlatPtr thread_red_zone_size = 128 ;
2022-10-16 14:58:44 +00:00
# elif ARCH(AARCH64)
constexpr static FlatPtr thread_red_zone_size = 0 ; // FIXME
TODO_AARCH64 ( ) ;
2021-06-23 19:54:41 +00:00
# else
2022-02-25 21:28:06 +00:00
# error Unknown architecture in dispatch_signal
# endif
2021-06-29 08:31:25 +00:00
// Align the stack to 16 bytes.
2022-02-25 16:23:28 +00:00
// Note that we push some elements on to the stack before the return address,
// so we need to account for this here.
2022-02-25 21:28:06 +00:00
constexpr static FlatPtr elements_pushed_on_stack_before_handler_address = 1 ; // one slot for a saved register
FlatPtr const extra_bytes_pushed_on_stack_before_handler_address = sizeof ( ucontext ) + sizeof ( signal_info ) ;
FlatPtr stack_alignment = ( stack - elements_pushed_on_stack_before_handler_address * sizeof ( FlatPtr ) + extra_bytes_pushed_on_stack_before_handler_address ) % 16 ;
// Also note that we have to skip the thread red-zone (if needed), so do that here.
stack - = thread_red_zone_size + stack_alignment ;
auto start_of_stack = stack ;
2021-02-25 15:18:36 +00:00
2022-02-25 21:28:06 +00:00
TRY ( push_value_on_user_stack ( stack , 0 ) ) ; // syscall return value slot
2019-11-04 08:29:47 +00:00
2022-02-25 21:28:06 +00:00
TRY ( copy_value_on_user_stack ( stack , ucontext ) ) ;
auto pointer_to_ucontext = stack ;
TRY ( copy_value_on_user_stack ( stack , signal_info ) ) ;
auto pointer_to_signal_info = stack ;
Kernel: Properly align stack for signal handlers
The System V ABI requires that the stack is 16-byte aligned on function
call. Confusingly, however, they mean that the stack must be aligned
this way **before** the `CALL` instruction is executed. That instruction
pushes the return value onto the stack, so the callee will actually see
the stack pointer as a value `sizeof(FlatPtr)` smaller.
The signal trampoline was written with this in mind, but `setup_stack`
aligned the entire stack, *including the return address* to a 16-byte
boundary. Because of this, the trampoline subtracted too much from the
stack pointer, thus misaligning it.
This was not a problem on i686 because we didn't execute any
instructions from signal handlers that would require memory operands to
be aligned to more than 4 bytes. This is not the case, however, on
x86_64, where SSE instructions are enabled by default and they require
16-byte aligned operands. Running such instructions raised a GP fault,
immediately killing the offending program with a SIGSEGV signal.
This issue caused TestKernelAlarm to fail in LibC when ran locally, and
at one point, the zsh port was affected too.
Fixes #9291
2021-10-24 15:34:59 +00:00
2022-02-25 16:23:28 +00:00
// Make sure we actually pushed as many elements as we claimed to have pushed.
2022-02-25 21:28:06 +00:00
if ( start_of_stack - stack ! = elements_pushed_on_stack_before_handler_address * sizeof ( FlatPtr ) + extra_bytes_pushed_on_stack_before_handler_address ) {
PANIC ( " Stack in invalid state after signal trampoline, expected {:x} but got {:x} " ,
start_of_stack - elements_pushed_on_stack_before_handler_address * sizeof ( FlatPtr ) - extra_bytes_pushed_on_stack_before_handler_address , stack ) ;
}
VERIFY ( stack % 16 = = 0 ) ;
2022-02-26 15:00:51 +00:00
# if ARCH(I386) || ARCH(X86_64)
// Save the FPU/SSE state
TRY ( copy_value_on_user_stack ( stack , fpu_state ( ) ) ) ;
# endif
2022-02-25 21:28:06 +00:00
# if ARCH(I386)
// Leave one empty slot to align the stack for a handler call.
TRY ( push_value_on_user_stack ( stack , 0 ) ) ;
# endif
TRY ( push_value_on_user_stack ( stack , pointer_to_ucontext ) ) ;
TRY ( push_value_on_user_stack ( stack , pointer_to_signal_info ) ) ;
TRY ( push_value_on_user_stack ( stack , signal ) ) ;
2022-02-25 16:23:28 +00:00
2022-02-25 21:28:06 +00:00
# if ARCH(I386)
2022-02-25 16:23:28 +00:00
VERIFY ( stack % 16 = = 0 ) ;
2022-02-25 21:28:06 +00:00
# endif
Kernel: Properly align stack for signal handlers
The System V ABI requires that the stack is 16-byte aligned on function
call. Confusingly, however, they mean that the stack must be aligned
this way **before** the `CALL` instruction is executed. That instruction
pushes the return value onto the stack, so the callee will actually see
the stack pointer as a value `sizeof(FlatPtr)` smaller.
The signal trampoline was written with this in mind, but `setup_stack`
aligned the entire stack, *including the return address* to a 16-byte
boundary. Because of this, the trampoline subtracted too much from the
stack pointer, thus misaligning it.
This was not a problem on i686 because we didn't execute any
instructions from signal handlers that would require memory operands to
be aligned to more than 4 bytes. This is not the case, however, on
x86_64, where SSE instructions are enabled by default and they require
16-byte aligned operands. Running such instructions raised a GP fault,
immediately killing the offending program with a SIGSEGV signal.
This issue caused TestKernelAlarm to fail in LibC when ran locally, and
at one point, the zsh port was affected too.
Fixes #9291
2021-10-24 15:34:59 +00:00
2022-02-25 21:28:06 +00:00
TRY ( push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ) ;
2019-11-04 08:29:47 +00:00
Kernel: Fix UB caused by taking a reference to a packed struct's member
Taking a reference or a pointer to a value that's not aligned properly
is undefined behavior. While `[[gnu::packed]]` ensures that reads from
and writes to fields of packed structs is a safe operation, the
information about the reduced alignment is lost when creating pointers
to these values.
Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these,
even though the doc of `-Waddress-of-packed-member` says that it usually
leads to UB. In contrast, x86_64 Clang does flag these, which renders
the 64-bit kernel unable to boot.
For now, the `address-of-packed-member` warning will only be enabled in
the kernel, as it is absolutely crucial there because of KUBSAN, but
might get excessively noisy for the userland in the future.
Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other
flags in the kernel, because flags added via `add_compile_options` come
after these, so the `-Wno-address-of-packed-member` in the root would
cancel it out.
2021-08-01 18:30:43 +00:00
// We write back the adjusted stack value into the register state.
// We have to do this because we can't just pass around a reference to a packed field, as it's UB.
2021-08-19 19:53:53 +00:00
state . set_userspace_sp ( stack ) ;
2021-11-29 23:21:03 +00:00
return { } ;
2019-11-04 08:29:47 +00:00
} ;
// We now place the thread state on the userspace stack.
2020-08-02 18:08:22 +00:00
// Note that we use a RegisterState.
2020-02-15 23:15:37 +00:00
// Conversely, when the thread isn't blocking the RegisterState may not be
2019-11-04 08:29:47 +00:00
// valid (fork, exec etc) but the tss will, so we use that instead.
2020-08-02 18:08:22 +00:00
auto & regs = get_register_dump_from_stack ( ) ;
2021-11-29 23:21:03 +00:00
auto result = setup_stack ( regs ) ;
if ( result . is_error ( ) ) {
dbgln ( " Invalid stack pointer: {} " , regs . userspace_sp ( ) ) ;
process . set_should_generate_coredump ( true ) ;
process . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
} ) ;
m_process - > terminate_due_to_signal ( signal ) ;
return DispatchSignalResult : : Terminate ;
}
2021-06-26 12:56:28 +00:00
auto signal_trampoline_addr = process . signal_trampoline ( ) . get ( ) ;
2021-08-19 19:53:53 +00:00
regs . set_ip ( signal_trampoline_addr ) ;
2019-03-23 21:03:17 +00:00
2021-07-21 17:53:38 +00:00
dbgln_if ( SIGNAL_DEBUG , " Thread in state '{}' has been primed with signal handler {:#04x}:{:p} to deliver {} " , state_string ( ) , m_regs . cs , m_regs . ip ( ) , signal ) ;
2021-06-23 19:54:41 +00:00
2020-11-29 23:05:27 +00:00
return DispatchSignalResult : : Continue ;
2019-03-23 21:03:17 +00:00
}
2020-02-15 23:15:37 +00:00
RegisterState & Thread : : get_register_dump_from_stack ( )
2019-11-02 09:11:41 +00:00
{
2021-01-25 20:19:34 +00:00
auto * trap = current_trap ( ) ;
// We should *always* have a trap. If we don't we're probably a kernel
2021-08-22 08:44:43 +00:00
// thread that hasn't been preempted. If we want to support this, we
2021-06-29 08:31:25 +00:00
// need to capture the registers probably into m_regs and return it
2021-02-23 19:42:32 +00:00
VERIFY ( trap ) ;
2021-01-25 20:19:34 +00:00
while ( trap ) {
if ( ! trap - > next_trap )
break ;
trap = trap - > next_trap ;
}
return * trap - > regs ;
2019-11-02 09:11:41 +00:00
}
2022-08-19 18:53:40 +00:00
ErrorOr < NonnullLockRefPtr < Thread > > Thread : : try_clone ( Process & process )
2019-03-23 21:03:17 +00:00
{
2021-09-05 12:06:42 +00:00
auto clone = TRY ( Thread : : try_create ( process ) ) ;
2022-02-24 18:55:49 +00:00
m_signal_action_masks . span ( ) . copy_to ( clone - > m_signal_action_masks ) ;
2019-03-23 21:03:17 +00:00
clone - > m_signal_mask = m_signal_mask ;
2021-08-05 20:29:38 +00:00
clone - > m_fpu_state = m_fpu_state ;
2019-09-07 13:50:44 +00:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2019-03-23 21:03:17 +00:00
return clone ;
}
2020-12-09 04:18:45 +00:00
void Thread : : set_state ( State new_state , u8 stop_signal )
2019-05-18 18:07:00 +00:00
{
2020-12-09 04:18:45 +00:00
State previous_state ;
2021-08-29 18:10:24 +00:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2019-12-01 14:54:47 +00:00
if ( new_state = = m_state )
return ;
2020-12-09 04:18:45 +00:00
{
previous_state = m_state ;
2022-01-30 10:38:50 +00:00
if ( previous_state = = Thread : : State : : Invalid ) {
2020-12-09 04:18:45 +00:00
// If we were *just* created, we may have already pending signals
if ( has_unmasked_pending_signals ( ) ) {
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Dispatch pending signals to new thread {} " , * this ) ;
2020-12-09 04:18:45 +00:00
dispatch_one_pending_signal ( ) ;
}
2020-09-07 14:31:00 +00:00
}
2020-12-09 04:18:45 +00:00
m_state = new_state ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Set thread {} state to {} " , * this , state_string ( ) ) ;
2020-12-09 04:18:45 +00:00
}
2020-07-05 20:32:07 +00:00
2022-01-30 10:38:50 +00:00
if ( previous_state = = Thread : : State : : Runnable ) {
2021-01-22 23:56:08 +00:00
Scheduler : : dequeue_runnable_thread ( * this ) ;
2022-01-30 10:38:50 +00:00
} else if ( previous_state = = Thread : : State : : Stopped ) {
2020-11-29 23:05:27 +00:00
m_stop_state = State : : Invalid ;
2020-12-09 04:18:45 +00:00
auto & process = this - > process ( ) ;
2021-10-31 22:52:43 +00:00
if ( process . set_stopped ( false ) ) {
2020-12-09 04:18:45 +00:00
process . for_each_thread ( [ & ] ( auto & thread ) {
2021-05-16 09:36:52 +00:00
if ( & thread = = this )
return ;
if ( ! thread . is_stopped ( ) )
return ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Resuming peer thread {} " , thread ) ;
2020-12-09 04:18:45 +00:00
thread . resume_from_stopped ( ) ;
} ) ;
process . unblock_waiters ( Thread : : WaitBlocker : : UnblockFlags : : Continued ) ;
2021-03-29 22:12:51 +00:00
// Tell the parent process (if any) about this change.
2022-11-02 20:26:02 +00:00
if ( auto parent = Process : : from_pid_ignoring_jails ( process . ppid ( ) ) ) {
2021-03-29 22:12:51 +00:00
[[maybe_unused]] auto result = parent - > send_signal ( SIGCHLD , & process ) ;
}
2020-12-09 04:18:45 +00:00
}
2020-11-29 23:05:27 +00:00
}
2022-01-30 10:38:50 +00:00
if ( m_state = = Thread : : State : : Runnable ) {
2021-08-08 12:19:55 +00:00
Scheduler : : enqueue_runnable_thread ( * this ) ;
2020-10-28 22:06:16 +00:00
Processor : : smp_wake_n_idle_processors ( 1 ) ;
2022-01-30 10:38:50 +00:00
} else if ( m_state = = Thread : : State : : Stopped ) {
2020-11-29 23:05:27 +00:00
// We don't want to restore to Running state, only Runnable!
2022-01-30 10:38:50 +00:00
m_stop_state = previous_state ! = Thread : : State : : Running ? previous_state : Thread : : State : : Runnable ;
2020-12-09 04:18:45 +00:00
auto & process = this - > process ( ) ;
2021-10-31 22:52:43 +00:00
if ( ! process . set_stopped ( true ) ) {
2020-12-09 04:18:45 +00:00
process . for_each_thread ( [ & ] ( auto & thread ) {
2021-05-16 09:36:52 +00:00
if ( & thread = = this )
return ;
if ( thread . is_stopped ( ) )
return ;
2021-02-07 12:03:24 +00:00
dbgln_if ( THREAD_DEBUG , " Stopping peer thread {} " , thread ) ;
2022-01-30 10:38:50 +00:00
thread . set_state ( Thread : : State : : Stopped , stop_signal ) ;
2020-12-09 04:18:45 +00:00
} ) ;
process . unblock_waiters ( Thread : : WaitBlocker : : UnblockFlags : : Stopped , stop_signal ) ;
2021-03-29 22:12:51 +00:00
// Tell the parent process (if any) about this change.
2022-11-02 20:26:02 +00:00
if ( auto parent = Process : : from_pid_ignoring_jails ( process . ppid ( ) ) ) {
2021-03-29 22:12:51 +00:00
[[maybe_unused]] auto result = parent - > send_signal ( SIGCHLD , & process ) ;
}
2020-12-09 04:18:45 +00:00
}
2022-01-30 10:38:50 +00:00
} else if ( m_state = = Thread : : State : : Dying ) {
VERIFY ( previous_state ! = Thread : : State : : Blocked ) ;
2020-08-06 01:13:28 +00:00
if ( this ! = Thread : : current ( ) & & is_finalizable ( ) ) {
// Some other thread set this thread to Dying, notify the
// finalizer right away as it can be cleaned up now
Scheduler : : notify_finalizer ( ) ;
}
2020-07-05 20:32:07 +00:00
}
2019-04-17 10:41:51 +00:00
}
2019-07-25 19:02:19 +00:00
2020-01-19 09:10:46 +00:00
struct RecognizedSymbol {
2021-02-25 15:18:36 +00:00
FlatPtr address ;
2022-04-01 17:58:27 +00:00
KernelSymbol const * symbol { nullptr } ;
2020-01-19 09:10:46 +00:00
} ;
2022-01-11 20:44:29 +00:00
static ErrorOr < bool > symbolicate ( RecognizedSymbol const & symbol , Process & process , StringBuilder & builder )
2020-01-19 09:10:46 +00:00
{
2021-11-06 21:06:08 +00:00
if ( symbol . address = = 0 )
2020-01-19 09:10:46 +00:00
return false ;
2022-08-20 22:21:01 +00:00
auto credentials = process . credentials ( ) ;
bool mask_kernel_addresses = ! credentials - > is_superuser ( ) ;
2020-04-08 11:30:50 +00:00
if ( ! symbol . symbol ) {
2021-08-06 11:49:36 +00:00
if ( ! Memory : : is_user_address ( VirtualAddress ( symbol . address ) ) ) {
2022-07-11 17:32:29 +00:00
TRY ( builder . try_append ( " 0xdeadc0de \n " sv ) ) ;
2020-01-19 09:10:46 +00:00
} else {
2022-08-23 15:58:05 +00:00
TRY ( process . address_space ( ) . with ( [ & ] ( auto & space ) - > ErrorOr < void > {
if ( auto * region = space - > find_region_containing ( { VirtualAddress ( symbol . address ) , sizeof ( FlatPtr ) } ) ) {
size_t offset = symbol . address - region - > vaddr ( ) . get ( ) ;
if ( auto region_name = region - > name ( ) ; ! region_name . is_null ( ) & & ! region_name . is_empty ( ) )
TRY ( builder . try_appendff ( " {:p} {} + {:#x} \n " , ( void * ) symbol . address , region_name , offset ) ) ;
else
TRY ( builder . try_appendff ( " {:p} {:p} + {:#x} \n " , ( void * ) symbol . address , region - > vaddr ( ) . as_ptr ( ) , offset ) ) ;
} else {
TRY ( builder . try_appendff ( " {:p} \n " , symbol . address ) ) ;
}
return { } ;
} ) ) ;
2020-01-19 09:10:46 +00:00
}
return true ;
}
2020-04-08 11:30:50 +00:00
unsigned offset = symbol . address - symbol . symbol - > address ;
2022-01-11 20:44:29 +00:00
if ( symbol . symbol - > address = = g_highest_kernel_symbol_address & & offset > 4096 )
TRY ( builder . try_appendff ( " {:p} \n " , ( void * ) ( mask_kernel_addresses ? 0xdeadc0de : symbol . address ) ) ) ;
else
TRY ( builder . try_appendff ( " {:p} {} + {:#x} \n " , ( void * ) ( mask_kernel_addresses ? 0xdeadc0de : symbol . address ) , symbol . symbol - > name , offset ) ) ;
2020-01-19 09:10:46 +00:00
return true ;
}
2022-01-11 20:44:29 +00:00
ErrorOr < NonnullOwnPtr < KString > > Thread : : backtrace ( )
2019-07-25 19:02:19 +00:00
{
2020-01-19 09:10:46 +00:00
Vector < RecognizedSymbol , 128 > recognized_symbols ;
2019-07-25 19:02:19 +00:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
2022-01-15 19:19:16 +00:00
auto stack_trace = TRY ( Processor : : capture_stack_trace ( * this ) ) ;
2021-08-29 18:10:24 +00:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2021-09-06 15:22:36 +00:00
ScopedAddressSpaceSwitcher switcher ( process ) ;
2020-12-08 04:29:41 +00:00
for ( auto & frame : stack_trace ) {
2021-08-06 11:49:36 +00:00
if ( Memory : : is_user_range ( VirtualAddress ( frame ) , sizeof ( FlatPtr ) * 2 ) ) {
2022-01-11 20:44:29 +00:00
TRY ( recognized_symbols . try_append ( { frame } ) ) ;
2020-12-08 04:29:41 +00:00
} else {
2022-01-11 20:44:29 +00:00
TRY ( recognized_symbols . try_append ( { frame , symbolicate_kernel_address ( frame ) } ) ) ;
2020-01-19 09:10:46 +00:00
}
2019-07-25 19:02:19 +00:00
}
2020-01-19 09:10:46 +00:00
StringBuilder builder ;
2019-07-25 19:02:19 +00:00
for ( auto & symbol : recognized_symbols ) {
2022-01-11 20:44:29 +00:00
if ( ! TRY ( symbolicate ( symbol , process , builder ) ) )
2019-07-25 19:02:19 +00:00
break ;
}
2022-01-11 20:44:29 +00:00
return KString : : try_create ( builder . string_view ( ) ) ;
2019-07-25 19:02:19 +00:00
}
2019-09-07 13:50:44 +00:00
2020-12-25 15:45:35 +00:00
size_t Thread : : thread_specific_region_alignment ( ) const
{
return max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
}
size_t Thread : : thread_specific_region_size ( ) const
{
return align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ( ) ) + sizeof ( ThreadSpecificData ) ;
}
2021-11-07 23:51:39 +00:00
ErrorOr < void > Thread : : make_thread_specific_region ( Badge < Process > )
2019-09-07 13:50:44 +00:00
{
2021-03-15 19:56:13 +00:00
// The process may not require a TLS region, or allocate TLS later with sys$allocate_tls (which is what dynamically loaded programs do)
2020-10-10 09:13:21 +00:00
if ( ! process ( ) . m_master_tls_region )
2021-11-07 23:51:39 +00:00
return { } ;
2020-10-10 09:13:21 +00:00
2022-08-23 15:58:05 +00:00
return process ( ) . address_space ( ) . with ( [ & ] ( auto & space ) - > ErrorOr < void > {
auto * region = TRY ( space - > allocate_region ( Memory : : RandomizeVirtualAddress : : Yes , { } , thread_specific_region_size ( ) , PAGE_SIZE , " Thread-specific " sv , PROT_READ | PROT_WRITE ) ) ;
2020-12-25 15:45:35 +00:00
2022-08-23 15:58:05 +00:00
m_thread_specific_range = region - > range ( ) ;
2021-05-28 09:18:58 +00:00
2022-08-23 15:58:05 +00:00
SmapDisabler disabler ;
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ( ) ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
m_thread_specific_data = VirtualAddress ( thread_specific_data ) ;
thread_specific_data - > self = thread_specific_data ;
2021-03-15 19:56:13 +00:00
2022-08-23 15:58:05 +00:00
if ( process ( ) . m_master_tls_size ! = 0 )
memcpy ( thread_local_storage , process ( ) . m_master_tls_region . unsafe_ptr ( ) - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2021-03-15 19:56:13 +00:00
2022-08-23 15:58:05 +00:00
return { } ;
} ) ;
2019-09-07 13:50:44 +00:00
}
2019-10-13 12:36:55 +00:00
2022-08-19 18:53:40 +00:00
LockRefPtr < Thread > Thread : : from_tid ( ThreadID tid )
2019-12-30 18:23:13 +00:00
{
2022-08-19 18:53:40 +00:00
return Thread : : all_instances ( ) . with ( [ & ] ( auto & list ) - > LockRefPtr < Thread > {
2021-08-15 10:38:02 +00:00
for ( Thread & thread : list ) {
if ( thread . tid ( ) = = tid )
return thread ;
2021-05-20 19:58:36 +00:00
}
2021-08-15 10:38:02 +00:00
return nullptr ;
} ) ;
2019-12-30 18:23:13 +00:00
}
2020-02-16 00:27:42 +00:00
2020-02-18 12:44:27 +00:00
void Thread : : reset_fpu_state ( )
{
2021-08-22 13:35:54 +00:00
memcpy ( & m_fpu_state , & Processor : : clean_fpu_state ( ) , sizeof ( FPUState ) ) ;
2020-02-18 12:44:27 +00:00
}
2020-12-09 04:18:45 +00:00
bool Thread : : should_be_stopped ( ) const
2020-04-07 15:23:37 +00:00
{
2020-12-09 04:18:45 +00:00
return process ( ) . is_stopped ( ) ;
2020-04-07 15:23:37 +00:00
}
2021-09-07 09:40:31 +00:00
void Thread : : track_lock_acquire ( LockRank rank )
{
// Nothing to do for locks without a rank.
if ( rank = = LockRank : : None )
return ;
if ( m_lock_rank_mask ! = LockRank : : None ) {
// Verify we are only attempting to take a lock of a higher rank.
VERIFY ( m_lock_rank_mask > rank ) ;
}
m_lock_rank_mask | = rank ;
}
void Thread : : track_lock_release ( LockRank rank )
{
// Nothing to do for locks without a rank.
if ( rank = = LockRank : : None )
return ;
// The rank value from the caller should only contain a single bit, otherwise
// we are disabling the tracking for multiple locks at once which will corrupt
// the lock tracking mask, and we will assert somewhere else.
auto rank_is_a_single_bit = [ ] ( auto rank_enum ) - > bool {
2021-09-12 15:17:31 +00:00
auto rank = to_underlying ( rank_enum ) ;
2021-09-07 09:40:31 +00:00
auto rank_without_least_significant_bit = rank - 1 ;
return ( rank & rank_without_least_significant_bit ) = = 0 ;
} ;
// We can't release locks out of order, as that would violate the ranking.
// This is validated by toggling the least significant bit of the mask, and
// then bit wise or-ing the rank we are trying to release with the resulting
// mask. If the rank we are releasing is truly the highest rank then the mask
2022-08-10 03:32:36 +00:00
// we get back will be equal to the current mask stored on the thread.
2021-09-07 09:40:31 +00:00
auto rank_is_in_order = [ ] ( auto mask_enum , auto rank_enum ) - > bool {
2021-09-12 15:17:31 +00:00
auto mask = to_underlying ( mask_enum ) ;
auto rank = to_underlying ( rank_enum ) ;
2021-09-07 09:40:31 +00:00
auto mask_without_least_significant_bit = mask - 1 ;
return ( ( mask & mask_without_least_significant_bit ) | rank ) = = mask ;
} ;
VERIFY ( has_flag ( m_lock_rank_mask , rank ) ) ;
VERIFY ( rank_is_a_single_bit ( rank ) ) ;
VERIFY ( rank_is_in_order ( m_lock_rank_mask , rank ) ) ;
m_lock_rank_mask ^ = rank ;
}
2020-02-16 00:27:42 +00:00
}
2021-01-08 23:42:44 +00:00
2021-11-16 00:15:21 +00:00
ErrorOr < void > AK : : Formatter < Kernel : : Thread > : : format ( FormatBuilder & builder , Kernel : : Thread const & value )
2021-01-08 23:42:44 +00:00
{
return AK : : Formatter < FormatString > : : format (
builder ,
2022-07-11 17:32:29 +00:00
" {}({}:{}) " sv , value . process ( ) . name ( ) , value . pid ( ) . value ( ) , value . tid ( ) . value ( ) ) ;
2021-01-08 23:42:44 +00:00
}