2020-01-18 08:38:21 +00:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2019-11-29 13:55:07 +00:00
# include <AK/Demangle.h>
2019-07-25 19:02:19 +00:00
# include <AK/StringBuilder.h>
2020-11-15 18:58:19 +00:00
# include <AK/Time.h>
2020-01-05 17:00:15 +00:00
# include <Kernel/Arch/i386/CPU.h>
2019-06-07 07:36:51 +00:00
# include <Kernel/FileSystem/FileDescription.h>
2020-02-16 00:27:42 +00:00
# include <Kernel/KSyms.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Process.h>
2020-01-19 12:53:22 +00:00
# include <Kernel/Profiling.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Scheduler.h>
# include <Kernel/Thread.h>
2020-03-28 08:47:16 +00:00
# include <Kernel/ThreadTracer.h>
2020-04-26 09:32:37 +00:00
# include <Kernel/TimerQueue.h>
2019-04-03 13:13:07 +00:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 00:33:41 +00:00
# include <Kernel/VM/PageDirectory.h>
2020-03-01 14:38:09 +00:00
# include <Kernel/VM/ProcessPagingScope.h>
2019-03-23 21:03:17 +00:00
# include <LibC/signal_numbers.h>
2020-04-11 18:24:07 +00:00
# include <LibELF/Loader.h>
2019-03-23 21:03:17 +00:00
2019-05-22 11:23:41 +00:00
//#define SIGNAL_DEBUG
2020-02-01 09:27:25 +00:00
//#define THREAD_DEBUG
2019-05-22 11:23:41 +00:00
2020-02-16 00:27:42 +00:00
namespace Kernel {
2020-08-02 02:04:56 +00:00
Thread : : Thread ( NonnullRefPtr < Process > process )
: m_process ( move ( process ) )
, m_name ( m_process - > name ( ) )
2019-05-18 16:31:36 +00:00
{
2020-08-02 02:04:56 +00:00
if ( m_process - > m_thread_count . fetch_add ( 1 , AK : : MemoryOrder : : memory_order_acq_rel ) = = 0 ) {
2019-12-22 10:51:24 +00:00
// First thread gets TID == PID
2020-08-08 15:32:34 +00:00
m_tid = m_process - > pid ( ) . value ( ) ;
2019-12-22 10:51:24 +00:00
} else {
2020-08-08 15:32:34 +00:00
m_tid = Process : : allocate_pid ( ) . value ( ) ;
2019-12-22 10:51:24 +00:00
}
2020-02-01 09:27:25 +00:00
# ifdef THREAD_DEBUG
2020-08-08 23:08:24 +00:00
dbg ( ) < < " Created new thread " < < m_process - > name ( ) < < " ( " < < m_process - > pid ( ) . value ( ) < < " : " < < m_tid . value ( ) < < " ) " ;
2020-02-01 09:27:25 +00:00
# endif
2019-03-23 21:03:17 +00:00
set_default_signal_dispositions ( ) ;
2020-08-29 22:41:30 +00:00
m_fpu_state = ( FPUState * ) kmalloc_aligned < 16 > ( sizeof ( FPUState ) ) ;
2020-02-18 12:44:27 +00:00
reset_fpu_state ( ) ;
2019-03-23 21:03:17 +00:00
memset ( & m_tss , 0 , sizeof ( m_tss ) ) ;
2020-01-01 16:26:25 +00:00
m_tss . iomapbase = sizeof ( TSS32 ) ;
2019-03-23 21:03:17 +00:00
// Only IF is set when a process boots.
m_tss . eflags = 0x0202 ;
2020-09-10 15:46:24 +00:00
if ( m_process - > is_kernel_process ( ) ) {
2020-06-27 19:42:28 +00:00
m_tss . cs = GDT_SELECTOR_CODE0 ;
m_tss . ds = GDT_SELECTOR_DATA0 ;
m_tss . es = GDT_SELECTOR_DATA0 ;
m_tss . fs = GDT_SELECTOR_PROC ;
m_tss . ss = GDT_SELECTOR_DATA0 ;
m_tss . gs = 0 ;
2019-03-23 21:03:17 +00:00
} else {
2020-06-27 19:42:28 +00:00
m_tss . cs = GDT_SELECTOR_CODE3 | 3 ;
m_tss . ds = GDT_SELECTOR_DATA3 | 3 ;
m_tss . es = GDT_SELECTOR_DATA3 | 3 ;
m_tss . fs = GDT_SELECTOR_DATA3 | 3 ;
m_tss . ss = GDT_SELECTOR_DATA3 | 3 ;
m_tss . gs = GDT_SELECTOR_TLS | 3 ;
2019-03-23 21:03:17 +00:00
}
2020-08-02 02:04:56 +00:00
m_tss . cr3 = m_process - > page_directory ( ) . cr3 ( ) ;
2019-03-23 21:03:17 +00:00
2020-08-08 23:08:24 +00:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid . value ( ) ) , Region : : Access : : Read | Region : : Access : : Write , false , true ) ;
2020-01-27 11:52:10 +00:00
m_kernel_stack_region - > set_stack ( true ) ;
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2020-09-10 15:46:24 +00:00
if ( m_process - > is_kernel_process ( ) ) {
2020-06-27 19:42:28 +00:00
m_tss . esp = m_tss . esp0 = m_kernel_stack_top ;
2019-03-23 21:03:17 +00:00
} else {
2020-01-27 11:52:10 +00:00
// Ring 3 processes get a separate stack for ring 0.
// The ring 3 stack will be assigned by exec().
2020-06-27 19:42:28 +00:00
m_tss . ss0 = GDT_SELECTOR_DATA0 ;
2019-09-04 02:31:38 +00:00
m_tss . esp0 = m_kernel_stack_top ;
2019-03-23 21:03:17 +00:00
}
2020-09-27 14:53:35 +00:00
// We need to add another reference if we could successfully create
// all the resources needed for this thread. The reason for this is that
// we don't want to delete this thread after dropping the reference,
// it may still be running or scheduled to be run.
// The finalizer is responsible for dropping this reference once this
// thread is ready to be cleaned up.
ref ( ) ;
2020-08-02 02:04:56 +00:00
if ( m_process - > pid ( ) ! = 0 )
2019-07-19 15:21:13 +00:00
Scheduler : : init_thread ( * this ) ;
2019-03-23 21:03:17 +00:00
}
Thread : : ~ Thread ( )
{
2020-11-11 23:05:00 +00:00
{
// We need to explicitly remove ourselves from the thread list
// here. We may get pre-empted in the middle of destructing this
// thread, which causes problems if the thread list is iterated.
// Specifically, if this is the last thread of a process, checking
// block conditions would access m_process, which would be in
// the middle of being destroyed.
ScopedSpinLock lock ( g_scheduler_lock ) ;
g_scheduler_data - > thread_list_for_state ( m_state ) . remove ( * this ) ;
}
2020-09-26 03:44:43 +00:00
ASSERT ( ! m_joiner ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : unblock ( )
{
2020-10-26 02:22:59 +00:00
ASSERT ( g_scheduler_lock . own_lock ( ) ) ;
2020-08-02 22:59:01 +00:00
ASSERT ( m_lock . own_lock ( ) ) ;
2020-04-06 12:38:33 +00:00
m_blocker = nullptr ;
2020-06-28 21:34:31 +00:00
if ( Thread : : current ( ) = = this ) {
2020-08-10 20:05:24 +00:00
set_state ( Thread : : Running ) ;
2019-03-23 21:03:17 +00:00
return ;
}
ASSERT ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
2020-08-10 20:05:24 +00:00
set_state ( Thread : : Runnable ) ;
2019-03-23 21:03:17 +00:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
void Thread : : set_should_die ( )
{
2019-12-22 10:35:02 +00:00
if ( m_should_die ) {
2020-02-01 09:27:25 +00:00
# ifdef THREAD_DEBUG
dbg ( ) < < * this < < " Should already die " ;
# endif
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
return ;
2019-12-22 10:35:02 +00:00
}
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
// Remember that we should die instead of returning to
// the userspace.
2020-08-14 16:24:31 +00:00
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
m_should_die = true ;
// NOTE: Even the current thread can technically be in "Stopped"
// state! This is the case when another thread sent a SIGSTOP to
// it while it was running and it calls e.g. exit() before
// the scheduler gets involved again.
if ( is_stopped ( ) ) {
// If we were stopped, we need to briefly resume so that
// the kernel stacks can clean up. We won't ever return back
// to user mode, though
resume_from_stopped ( ) ;
2020-09-26 16:55:48 +00:00
} else if ( state ( ) = = Queued ) {
// m_queue can only be accessed safely if g_scheduler_lock is held!
if ( m_queue ) {
m_queue - > dequeue ( * this ) ;
m_queue = nullptr ;
// Wake the thread
wake_from_queue ( ) ;
}
2020-08-14 16:24:31 +00:00
}
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
if ( is_blocked ( ) ) {
2020-08-02 22:59:01 +00:00
ScopedSpinLock lock ( m_lock ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
ASSERT ( m_blocker ! = nullptr ) ;
2020-01-10 18:15:01 +00:00
// We're blocked in the kernel.
m_blocker - > set_interrupted_by_death ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
unblock ( ) ;
}
}
void Thread : : die_if_needed ( )
{
2020-06-28 21:34:31 +00:00
ASSERT ( Thread : : current ( ) = = this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
if ( ! m_should_die )
return ;
2020-07-05 20:32:07 +00:00
unlock_process_if_locked ( ) ;
2019-12-22 11:34:38 +00:00
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
2020-08-10 20:05:24 +00:00
set_should_die ( ) ;
2020-07-04 23:37:36 +00:00
2020-07-05 20:32:07 +00:00
// Flag a context switch. Because we're in a critical section,
// Scheduler::yield will actually only mark a pending scontext switch
// Simply leaving the critical section would not necessarily trigger
// a switch.
2020-06-27 19:42:28 +00:00
Scheduler : : yield ( ) ;
2020-07-04 23:37:36 +00:00
2020-07-05 20:32:07 +00:00
// Now leave the critical section so that we can also trigger the
// actual context switch
u32 prev_flags ;
Processor : : current ( ) . clear_critical ( prev_flags , false ) ;
2020-07-04 23:37:36 +00:00
dbg ( ) < < " die_if_needed returned form clear_critical!!! in irq: " < < Processor : : current ( ) . in_irq ( ) ;
2020-07-05 20:32:07 +00:00
// We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again
ASSERT_NOT_REACHED ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 15:46:01 +00:00
}
2019-11-16 11:18:59 +00:00
void Thread : : yield_without_holding_big_lock ( )
2019-03-23 21:03:17 +00:00
{
2020-07-05 20:32:07 +00:00
bool did_unlock = unlock_process_if_locked ( ) ;
2020-09-26 03:44:43 +00:00
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
2019-03-23 21:03:17 +00:00
Scheduler : : yield ( ) ;
2020-07-05 20:32:07 +00:00
relock_process ( did_unlock ) ;
2019-03-23 21:03:17 +00:00
}
2019-12-01 10:57:20 +00:00
2020-07-05 20:32:07 +00:00
bool Thread : : unlock_process_if_locked ( )
2019-12-01 10:57:20 +00:00
{
2020-01-12 21:53:20 +00:00
return process ( ) . big_lock ( ) . force_unlock_if_locked ( ) ;
2019-12-01 14:54:47 +00:00
}
2020-07-05 20:32:07 +00:00
void Thread : : relock_process ( bool did_unlock )
2019-12-01 14:54:47 +00:00
{
2020-09-26 03:44:43 +00:00
// Clearing the critical section may trigger the context switch
// flagged by calling Scheduler::donate_to or Scheduler::yield
// above. We have to do it this way because we intentionally
// leave the critical section here to be able to switch contexts.
u32 prev_flags ;
u32 prev_crit = Processor : : current ( ) . clear_critical ( prev_flags , true ) ;
if ( did_unlock ) {
// We've unblocked, relock the process if needed and carry on.
2020-07-03 11:19:50 +00:00
process ( ) . big_lock ( ) . lock ( ) ;
2020-09-26 03:44:43 +00:00
}
2020-10-02 21:14:37 +00:00
// NOTE: We may be on a different CPU now!
2020-09-26 03:44:43 +00:00
Processor : : current ( ) . restore_critical ( prev_crit , prev_flags ) ;
2019-12-01 10:57:20 +00:00
}
2019-03-23 21:03:17 +00:00
2020-11-15 18:58:19 +00:00
auto Thread : : sleep ( const timespec & duration , timespec * remaining_time ) - > BlockResult
2019-03-23 21:03:17 +00:00
{
2019-03-24 00:52:10 +00:00
ASSERT ( state ( ) = = Thread : : Running ) ;
2020-11-15 18:58:19 +00:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( nullptr , Thread : : BlockTimeout ( false , & duration ) , remaining_time ) ;
}
auto Thread : : sleep_until ( const timespec & deadline ) - > BlockResult
{
ASSERT ( state ( ) = = Thread : : Running ) ;
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( nullptr , Thread : : BlockTimeout ( true , & deadline ) ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-19 07:51:48 +00:00
const char * Thread : : state_string ( ) const
2019-03-23 21:03:17 +00:00
{
2019-07-19 07:51:48 +00:00
switch ( state ( ) ) {
2019-06-07 09:43:58 +00:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
2019-12-01 14:54:47 +00:00
case Thread : : Queued :
return " Queued " ;
2020-09-27 14:53:35 +00:00
case Thread : : Blocked : {
ScopedSpinLock lock ( m_lock ) ;
2019-09-09 03:58:42 +00:00
ASSERT ( m_blocker ! = nullptr ) ;
return m_blocker - > state_string ( ) ;
2019-03-23 21:03:17 +00:00
}
2020-09-27 14:53:35 +00:00
}
2020-03-01 19:45:39 +00:00
klog ( ) < < " Thread::state_string(): Invalid state: " < < state ( ) ;
2019-03-23 21:03:17 +00:00
ASSERT_NOT_REACHED ( ) ;
return nullptr ;
}
void Thread : : finalize ( )
{
2020-06-28 21:34:31 +00:00
ASSERT ( Thread : : current ( ) = = g_finalizer ) ;
2020-07-05 20:32:07 +00:00
ASSERT ( Thread : : current ( ) ! = this ) ;
2019-08-01 18:01:23 +00:00
2020-10-26 02:22:59 +00:00
ASSERT ( ! m_lock . own_lock ( ) ) ;
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
2020-02-01 09:27:25 +00:00
# ifdef THREAD_DEBUG
2020-10-26 02:22:59 +00:00
dbg ( ) < < " Finalizing thread " < < * this ;
2020-02-01 09:27:25 +00:00
# endif
2020-10-26 02:22:59 +00:00
set_state ( Thread : : State : : Dead ) ;
2019-03-23 21:03:17 +00:00
2020-10-26 02:22:59 +00:00
if ( auto * joiner = m_joiner . exchange ( nullptr , AK : : memory_order_acq_rel ) ) {
// Notify joiner that we exited
static_cast < JoinBlocker * > ( joiner - > m_blocker ) - > joinee_exited ( m_exit_value ) ;
}
2019-11-14 19:58:23 +00:00
}
2019-08-06 17:43:07 +00:00
if ( m_dump_backtrace_on_finalization )
dbg ( ) < < backtrace_impl ( ) ;
2020-09-27 14:53:35 +00:00
kfree_aligned ( m_fpu_state ) ;
auto thread_cnt_before = m_process - > m_thread_count . fetch_sub ( 1 , AK : : MemoryOrder : : memory_order_acq_rel ) ;
ASSERT ( thread_cnt_before ! = 0 ) ;
if ( thread_cnt_before = = 1 )
process ( ) . finalize ( ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : finalize_dying_threads ( )
{
2020-06-28 21:34:31 +00:00
ASSERT ( Thread : : current ( ) = = g_finalizer ) ;
2019-04-20 12:02:19 +00:00
Vector < Thread * , 32 > dying_threads ;
2019-03-23 21:03:17 +00:00
{
2020-07-05 20:32:07 +00:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-06-07 09:43:58 +00:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2020-07-05 20:32:07 +00:00
if ( thread . is_finalizable ( ) )
dying_threads . append ( & thread ) ;
2019-07-19 10:16:00 +00:00
return IterationDecision : : Continue ;
2019-03-23 21:03:17 +00:00
} ) ;
}
2019-12-22 10:35:02 +00:00
for ( auto * thread : dying_threads ) {
2019-03-23 21:03:17 +00:00
thread - > finalize ( ) ;
2020-09-27 14:53:35 +00:00
// This thread will never execute again, drop the running reference
// NOTE: This may not necessarily drop the last reference if anything
// else is still holding onto this thread!
thread - > unref ( ) ;
2019-12-22 10:35:02 +00:00
}
2019-03-23 21:03:17 +00:00
}
bool Thread : : tick ( )
{
+ + m_ticks ;
if ( tss ( ) . cs & 3 )
2020-08-02 02:04:56 +00:00
+ + m_process - > m_ticks_in_user ;
2019-03-23 21:03:17 +00:00
else
2020-08-02 02:04:56 +00:00
+ + m_process - > m_ticks_in_kernel ;
2019-03-23 21:03:17 +00:00
return - - m_ticks_left ;
}
2020-09-09 02:37:15 +00:00
bool Thread : : has_pending_signal ( u8 signal ) const
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
return m_pending_signals & ( 1 < < ( signal - 1 ) ) ;
}
u32 Thread : : pending_signals ( ) const
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
return m_pending_signals ;
}
2020-02-01 09:27:25 +00:00
void Thread : : send_signal ( u8 signal , [[maybe_unused]] Process * sender )
2019-03-23 21:03:17 +00:00
{
ASSERT ( signal < 32 ) ;
2020-09-09 02:37:15 +00:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-07-08 16:59:48 +00:00
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
2020-02-01 09:27:25 +00:00
# ifdef SIGNAL_DEBUG
2020-02-29 11:51:44 +00:00
dbg ( ) < < " Signal " < < signal < < " was ignored by " < < process ( ) ;
2020-02-01 09:27:25 +00:00
# endif
2019-07-08 16:59:48 +00:00
return ;
}
2019-03-23 21:03:17 +00:00
2020-02-01 09:27:25 +00:00
# ifdef SIGNAL_DEBUG
2019-03-23 21:03:17 +00:00
if ( sender )
2020-02-29 11:51:44 +00:00
dbg ( ) < < " Signal: " < < * sender < < " sent " < < signal < < " to " < < process ( ) ;
2019-03-23 21:03:17 +00:00
else
2020-02-29 11:51:44 +00:00
dbg ( ) < < " Signal: Kernel sent " < < signal < < " to " < < process ( ) ;
2020-02-01 09:27:25 +00:00
# endif
2019-03-23 21:03:17 +00:00
2019-08-01 09:00:36 +00:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2020-09-07 14:31:00 +00:00
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
}
2020-09-09 02:37:15 +00:00
u32 Thread : : update_signal_mask ( u32 signal_mask )
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
auto previous_signal_mask = m_signal_mask ;
m_signal_mask = signal_mask ;
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
return previous_signal_mask ;
}
u32 Thread : : signal_mask ( ) const
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
return m_signal_mask ;
}
u32 Thread : : signal_mask_block ( sigset_t signal_set , bool block )
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
auto previous_signal_mask = m_signal_mask ;
if ( block )
m_signal_mask & = ~ signal_set ;
else
m_signal_mask | = signal_set ;
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
return previous_signal_mask ;
}
void Thread : : clear_signals ( )
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
m_signal_mask = 0 ;
m_pending_signals = 0 ;
m_have_any_unmasked_pending_signals . store ( false , AK : : memory_order_release ) ;
}
2019-10-07 09:22:50 +00:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
2020-07-03 11:19:50 +00:00
ASSERT ( Thread : : current ( ) = = this ) ;
ScopedSpinLock lock ( g_scheduler_lock ) ;
if ( dispatch_signal ( signal ) = = ShouldUnblockThread : : No )
Scheduler : : yield ( ) ;
2019-10-07 09:22:50 +00:00
}
2019-03-23 21:03:17 +00:00
ShouldUnblockThread Thread : : dispatch_one_pending_signal ( )
{
2020-09-07 14:31:00 +00:00
ASSERT ( m_lock . own_lock ( ) ) ;
2019-07-03 19:17:35 +00:00
u32 signal_candidates = m_pending_signals & ~ m_signal_mask ;
2019-03-23 21:03:17 +00:00
ASSERT ( signal_candidates ) ;
2019-08-01 09:00:36 +00:00
u8 signal = 1 ;
2019-03-23 21:03:17 +00:00
for ( ; signal < 32 ; + + signal ) {
2019-08-01 09:00:36 +00:00
if ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) {
2019-03-23 21:03:17 +00:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2019-06-07 15:13:23 +00:00
enum class DefaultSignalAction {
2019-03-23 21:03:17 +00:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
Kernel: Mark compilation-unit-only functions as static
This enables a nice warning in case a function becomes dead code. Also, in case
of signal_trampoline_dummy, marking it external (non-static) prevents it from
being 'optimized away', which would lead to surprising and weird linker errors.
I found these places by using -Wmissing-declarations.
The Kernel still shows these issues, which I think are false-positives,
but don't want to touch:
- Kernel/Arch/i386/CPU.cpp:1081:17: void Kernel::enter_thread_context(Kernel::Thread*, Kernel::Thread*)
- Kernel/Arch/i386/CPU.cpp:1170:17: void Kernel::context_first_init(Kernel::Thread*, Kernel::Thread*, Kernel::TrapFrame*)
- Kernel/Arch/i386/CPU.cpp:1304:16: u32 Kernel::do_init_context(Kernel::Thread*, u32)
- Kernel/Arch/i386/CPU.cpp:1347:17: void Kernel::pre_init_finished()
- Kernel/Arch/i386/CPU.cpp:1360:17: void Kernel::post_init_finished()
No idea, not gonna touch it.
- Kernel/init.cpp:104:30: void Kernel::init()
- Kernel/init.cpp:167:30: void Kernel::init_ap(u32, Kernel::Processor*)
- Kernel/init.cpp:184:17: void Kernel::init_finished(u32)
Called by boot.S.
- Kernel/init.cpp:383:16: int Kernel::__cxa_atexit(void (*)(void*), void*, void*)
- Kernel/StdLib.cpp:285:19: void __cxa_pure_virtual()
- Kernel/StdLib.cpp:300:19: void __stack_chk_fail()
- Kernel/StdLib.cpp:305:19: void __stack_chk_fail_local()
Not sure how to tell the compiler that the compiler is already using them.
Also, maybe __cxa_atexit should go into StdLib.cpp?
- Kernel/Modules/TestModule.cpp:31:17: void module_init()
- Kernel/Modules/TestModule.cpp:40:17: void module_fini()
Could maybe go into a new header. This would also provide type-checking for new modules.
2020-08-10 19:12:13 +00:00
static DefaultSignalAction default_signal_action ( u8 signal )
2019-03-23 21:03:17 +00:00
{
ASSERT ( signal & & signal < NSIG ) ;
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
2020-09-08 16:07:25 +00:00
case SIGINFO :
2019-03-23 21:03:17 +00:00
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-07-08 16:59:48 +00:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-10-07 09:22:50 +00:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
return ! action . handler_or_sigaction . is_null ( ) ;
}
2020-09-12 03:11:07 +00:00
static bool push_value_on_user_stack ( u32 * stack , u32 data )
2019-11-04 08:29:47 +00:00
{
* stack - = 4 ;
2020-09-12 03:11:07 +00:00
return copy_to_user ( ( u32 * ) * stack , & data ) ;
2019-11-04 08:29:47 +00:00
}
2020-08-14 16:24:31 +00:00
void Thread : : resume_from_stopped ( )
{
ASSERT ( is_stopped ( ) ) ;
ASSERT ( m_stop_state ! = State : : Invalid ) ;
2020-10-26 02:22:59 +00:00
ASSERT ( g_scheduler_lock . own_lock ( ) ) ;
2020-08-14 16:24:31 +00:00
set_state ( m_stop_state ) ;
m_stop_state = State : : Invalid ;
// make sure SemiPermanentBlocker is unblocked
if ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) {
ScopedSpinLock lock ( m_lock ) ;
if ( m_blocker & & m_blocker - > is_reason_signal ( ) )
unblock ( ) ;
}
}
2019-07-03 19:17:35 +00:00
ShouldUnblockThread Thread : : dispatch_signal ( u8 signal )
2019-03-23 21:03:17 +00:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2020-09-07 14:31:00 +00:00
ASSERT ( g_scheduler_lock . own_lock ( ) ) ;
2019-08-01 09:00:36 +00:00
ASSERT ( signal > 0 & & signal < = 32 ) ;
2020-09-10 15:46:24 +00:00
ASSERT ( process ( ) . is_user_process ( ) ) ;
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2020-08-02 18:08:22 +00:00
klog ( ) < < " signal: dispatch signal " < < signal < < " to " < < * this ;
2019-03-23 21:03:17 +00:00
# endif
2020-09-07 14:31:00 +00:00
if ( m_state = = Invalid | | ! is_initialized ( ) ) {
// Thread has barely been created, we need to wait until it is
// at least in Runnable state and is_initialized() returns true,
// which indicates that it is fully set up an we actually have
// a register state on the stack that we can modify
return ShouldUnblockThread : : No ;
}
2019-03-23 21:03:17 +00:00
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT ( ! ( action . flags & SA_SIGINFO ) ) ;
// Mark this signal as handled.
2019-08-01 09:00:36 +00:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2020-09-07 14:31:00 +00:00
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
if ( signal = = SIGSTOP ) {
2020-03-01 14:14:17 +00:00
if ( ! is_stopped ( ) ) {
m_stop_signal = SIGSTOP ;
set_state ( State : : Stopped ) ;
}
2019-03-23 21:03:17 +00:00
return ShouldUnblockThread : : No ;
}
2020-03-01 14:14:17 +00:00
if ( signal = = SIGCONT & & is_stopped ( ) ) {
2020-08-14 16:24:31 +00:00
resume_from_stopped ( ) ;
} else {
2020-03-28 08:47:16 +00:00
auto * thread_tracer = tracer ( ) ;
if ( thread_tracer ! = nullptr ) {
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
if ( ! thread_tracer - > has_pending_signal ( signal ) ) {
m_stop_signal = signal ;
// make sure SemiPermanentBlocker is unblocked
2020-08-02 22:59:01 +00:00
ScopedSpinLock lock ( m_lock ) ;
2020-03-28 08:47:16 +00:00
if ( m_blocker & & m_blocker - > is_reason_signal ( ) )
unblock ( ) ;
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
}
thread_tracer - > unset_signal ( signal ) ;
}
2020-03-01 14:14:17 +00:00
}
2019-03-23 21:03:17 +00:00
2019-06-07 10:56:50 +00:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-23 21:03:17 +00:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
2020-01-27 19:47:10 +00:00
m_stop_signal = signal ;
2019-03-23 21:03:17 +00:00
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
2019-08-06 17:43:07 +00:00
case DefaultSignalAction : : DumpCore :
process ( ) . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
return IterationDecision : : Continue ;
} ) ;
2019-07-25 19:02:19 +00:00
[[fallthrough]] ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Terminate :
2020-08-02 02:04:56 +00:00
m_process - > terminate_due_to_signal ( signal ) ;
2019-03-23 21:03:17 +00:00
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : Ignore :
2019-07-19 07:34:11 +00:00
ASSERT_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Continue :
return ShouldUnblockThread : : Yes ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-06-07 10:56:50 +00:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2020-08-02 18:08:22 +00:00
klog ( ) < < " signal: " < < * this < < " ignored signal " < < signal ;
2019-03-23 21:03:17 +00:00
# endif
return ShouldUnblockThread : : Yes ;
}
2019-09-04 13:14:54 +00:00
ProcessPagingScope paging_scope ( m_process ) ;
2019-07-03 19:17:35 +00:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-23 21:03:17 +00:00
if ( action . flags & SA_NODEFER )
2019-08-01 09:00:36 +00:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-23 21:03:17 +00:00
else
2019-08-01 09:00:36 +00:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-23 21:03:17 +00:00
m_signal_mask | = new_signal_mask ;
2020-09-07 14:31:00 +00:00
m_have_any_unmasked_pending_signals . store ( m_pending_signals & ~ m_signal_mask , AK : : memory_order_release ) ;
2019-03-23 21:03:17 +00:00
2020-09-07 14:31:00 +00:00
auto setup_stack = [ & ] ( RegisterState & state ) {
u32 * stack = & state . userspace_esp ;
2019-11-04 08:29:47 +00:00
u32 old_esp = * stack ;
u32 ret_eip = state . eip ;
u32 ret_eflags = state . eflags ;
2020-07-03 11:19:50 +00:00
# ifdef SIGNAL_DEBUG
klog ( ) < < " signal: setting up user stack to return to eip: " < < String : : format ( " %p " , ret_eip ) < < " esp: " < < String : : format ( " %p " , old_esp ) ;
# endif
2019-11-04 08:29:47 +00:00
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here.
u32 stack_alignment = ( * stack - 56 ) % 16 ;
* stack - = stack_alignment ;
push_value_on_user_stack ( stack , ret_eflags ) ;
push_value_on_user_stack ( stack , ret_eip ) ;
push_value_on_user_stack ( stack , state . eax ) ;
push_value_on_user_stack ( stack , state . ecx ) ;
push_value_on_user_stack ( stack , state . edx ) ;
push_value_on_user_stack ( stack , state . ebx ) ;
push_value_on_user_stack ( stack , old_esp ) ;
push_value_on_user_stack ( stack , state . ebp ) ;
push_value_on_user_stack ( stack , state . esi ) ;
push_value_on_user_stack ( stack , state . edi ) ;
// PUSH old_signal_mask
push_value_on_user_stack ( stack , old_signal_mask ) ;
push_value_on_user_stack ( stack , signal ) ;
push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ;
push_value_on_user_stack ( stack , 0 ) ; //push fake return address
ASSERT ( ( * stack % 16 ) = = 0 ) ;
} ;
// We now place the thread state on the userspace stack.
2020-08-02 18:08:22 +00:00
// Note that we use a RegisterState.
2020-02-15 23:15:37 +00:00
// Conversely, when the thread isn't blocking the RegisterState may not be
2019-11-04 08:29:47 +00:00
// valid (fork, exec etc) but the tss will, so we use that instead.
2020-08-02 18:08:22 +00:00
auto & regs = get_register_dump_from_stack ( ) ;
2020-09-07 14:31:00 +00:00
setup_stack ( regs ) ;
2020-08-02 18:08:22 +00:00
regs . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2020-08-02 18:08:22 +00:00
klog ( ) < < " signal: Okay, " < < * this < < " { " < < state_string ( ) < < " } has been primed with signal handler " < < String : : format ( " %w " , m_tss . cs ) < < " : " < < String : : format ( " %x " , m_tss . eip ) < < " to deliver " < < signal ;
2019-03-23 21:03:17 +00:00
# endif
return ShouldUnblockThread : : Yes ;
}
void Thread : : set_default_signal_dispositions ( )
{
// FIXME: Set up all the right default actions. See signal(7).
memset ( & m_signal_action_data , 0 , sizeof ( m_signal_action_data ) ) ;
2020-01-20 12:06:41 +00:00
m_signal_action_data [ SIGCHLD ] . handler_or_sigaction = VirtualAddress ( SIG_IGN ) ;
m_signal_action_data [ SIGWINCH ] . handler_or_sigaction = VirtualAddress ( SIG_IGN ) ;
2019-03-23 21:03:17 +00:00
}
2020-09-12 03:11:07 +00:00
bool Thread : : push_value_on_stack ( FlatPtr value )
2019-03-23 21:03:17 +00:00
{
m_tss . esp - = 4 ;
2020-03-08 09:36:51 +00:00
FlatPtr * stack_ptr = ( FlatPtr * ) m_tss . esp ;
2020-09-12 03:11:07 +00:00
return copy_to_user ( stack_ptr , & value ) ;
2019-03-23 21:03:17 +00:00
}
2020-02-15 23:15:37 +00:00
RegisterState & Thread : : get_register_dump_from_stack ( )
2019-11-02 09:11:41 +00:00
{
2020-02-15 23:15:37 +00:00
return * ( RegisterState * ) ( kernel_stack_top ( ) - sizeof ( RegisterState ) ) ;
2019-11-02 09:11:41 +00:00
}
2020-09-16 17:47:47 +00:00
KResultOr < u32 > Thread : : make_userspace_stack_for_main_thread ( Vector < String > arguments , Vector < String > environment , Vector < AuxiliaryValue > auxiliary_values )
2019-03-23 21:03:17 +00:00
{
2020-08-02 02:04:56 +00:00
auto * region = m_process - > allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , " Stack (Main thread) " , PROT_READ | PROT_WRITE , false ) ;
2020-09-16 17:47:47 +00:00
if ( ! region )
return KResult ( - ENOMEM ) ;
2019-11-17 11:11:43 +00:00
region - > set_stack ( true ) ;
2019-03-23 21:03:17 +00:00
2020-08-20 13:32:36 +00:00
FlatPtr new_esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
2019-12-18 22:03:23 +00:00
2020-08-20 13:32:36 +00:00
auto push_on_new_stack = [ & new_esp ] ( u32 value ) {
new_esp - = 4 ;
Userspace < u32 * > stack_ptr = new_esp ;
2020-09-12 03:11:07 +00:00
return copy_to_user ( stack_ptr , & value ) ;
2020-08-20 13:32:36 +00:00
} ;
2019-03-23 21:03:17 +00:00
2020-08-20 13:32:36 +00:00
auto push_aux_value_on_new_stack = [ & new_esp ] ( auxv_t value ) {
new_esp - = sizeof ( auxv_t ) ;
Userspace < auxv_t * > stack_ptr = new_esp ;
2020-09-12 03:11:07 +00:00
return copy_to_user ( stack_ptr , & value ) ;
2020-08-20 13:32:36 +00:00
} ;
2020-01-05 17:00:15 +00:00
2020-08-20 13:32:36 +00:00
auto push_string_on_new_stack = [ & new_esp ] ( const String & string ) {
new_esp - = round_up_to_power_of_two ( string . length ( ) + 1 , 4 ) ;
Userspace < u32 * > stack_ptr = new_esp ;
2020-09-12 03:11:07 +00:00
return copy_to_user ( stack_ptr , string . characters ( ) , string . length ( ) + 1 ) ;
2020-08-20 13:32:36 +00:00
} ;
Vector < FlatPtr > argv_entries ;
for ( auto & argument : arguments ) {
push_string_on_new_stack ( argument ) ;
argv_entries . append ( new_esp ) ;
2019-03-23 21:03:17 +00:00
}
2020-08-20 13:32:36 +00:00
Vector < FlatPtr > env_entries ;
for ( auto & variable : environment ) {
push_string_on_new_stack ( variable ) ;
env_entries . append ( new_esp ) ;
2019-03-23 21:03:17 +00:00
}
2020-08-20 13:32:36 +00:00
for ( auto & value : auxiliary_values ) {
if ( ! value . optional_string . is_empty ( ) ) {
push_string_on_new_stack ( value . optional_string ) ;
value . auxv . a_un . a_ptr = ( void * ) new_esp ;
2020-07-04 23:37:36 +00:00
}
}
2020-08-20 13:32:36 +00:00
for ( ssize_t i = auxiliary_values . size ( ) - 1 ; i > = 0 ; - - i ) {
auto & value = auxiliary_values [ i ] ;
push_aux_value_on_new_stack ( value . auxv ) ;
}
push_on_new_stack ( 0 ) ;
for ( ssize_t i = env_entries . size ( ) - 1 ; i > = 0 ; - - i )
push_on_new_stack ( env_entries [ i ] ) ;
FlatPtr envp = new_esp ;
push_on_new_stack ( 0 ) ;
for ( ssize_t i = argv_entries . size ( ) - 1 ; i > = 0 ; - - i )
push_on_new_stack ( argv_entries [ i ] ) ;
FlatPtr argv = new_esp ;
2019-12-18 22:03:23 +00:00
2019-03-23 21:03:17 +00:00
// NOTE: The stack needs to be 16-byte aligned.
2020-08-20 13:32:36 +00:00
new_esp - = new_esp % 16 ;
push_on_new_stack ( ( FlatPtr ) envp ) ;
2020-03-08 09:36:51 +00:00
push_on_new_stack ( ( FlatPtr ) argv ) ;
2020-08-20 13:32:36 +00:00
push_on_new_stack ( ( FlatPtr ) argv_entries . size ( ) ) ;
2019-12-18 22:03:23 +00:00
push_on_new_stack ( 0 ) ;
2020-07-04 23:37:36 +00:00
2019-12-18 22:03:23 +00:00
return new_esp ;
2019-03-23 21:03:17 +00:00
}
2020-09-27 14:53:35 +00:00
RefPtr < Thread > Thread : : clone ( Process & process )
2019-03-23 21:03:17 +00:00
{
2020-09-27 14:53:35 +00:00
auto clone = adopt ( * new Thread ( process ) ) ;
2019-03-23 21:03:17 +00:00
memcpy ( clone - > m_signal_action_data , m_signal_action_data , sizeof ( m_signal_action_data ) ) ;
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 14:27:45 +00:00
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-09-07 13:50:44 +00:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2020-06-27 19:42:28 +00:00
clone - > m_thread_specific_region_size = m_thread_specific_region_size ;
2019-03-23 21:03:17 +00:00
return clone ;
}
2019-05-18 18:07:00 +00:00
void Thread : : set_state ( State new_state )
{
2020-10-26 02:22:59 +00:00
ASSERT ( g_scheduler_lock . own_lock ( ) ) ;
2019-12-01 14:54:47 +00:00
if ( new_state = = m_state )
return ;
2019-07-19 15:58:45 +00:00
if ( new_state = = Blocked ) {
2019-07-21 10:14:58 +00:00
// we should always have a Blocker while blocked
2019-09-09 03:58:42 +00:00
ASSERT ( m_blocker ! = nullptr ) ;
2019-07-19 15:58:45 +00:00
}
2020-09-07 14:31:00 +00:00
auto previous_state = m_state ;
if ( previous_state = = Invalid ) {
// If we were *just* created, we may have already pending signals
ScopedSpinLock thread_lock ( m_lock ) ;
if ( has_unmasked_pending_signals ( ) ) {
dbg ( ) < < " Dispatch pending signals to new thread " < < * this ;
dispatch_one_pending_signal ( ) ;
}
}
2020-04-11 21:39:46 +00:00
if ( new_state = = Stopped ) {
2020-11-23 17:09:56 +00:00
// We don't want to restore to Running state, only Runnable!
m_stop_state = m_state ! = Running ? m_state : Runnable ;
2020-04-11 21:39:46 +00:00
}
2019-05-18 18:07:00 +00:00
m_state = new_state ;
2020-07-03 11:19:50 +00:00
# ifdef THREAD_DEBUG
2020-07-05 20:32:07 +00:00
dbg ( ) < < " Set Thread " < < * this < < " state to " < < state_string ( ) ;
2020-07-03 11:19:50 +00:00
# endif
2020-07-05 20:32:07 +00:00
2020-08-02 02:04:56 +00:00
if ( m_process - > pid ( ) ! = 0 ) {
2020-08-02 22:59:01 +00:00
update_state_for_thread ( previous_state ) ;
ASSERT ( g_scheduler_data - > has_thread ( * this ) ) ;
2019-07-19 11:04:42 +00:00
}
2019-12-01 18:17:17 +00:00
2020-08-06 01:13:28 +00:00
if ( m_state = = Dying ) {
2020-08-10 20:05:24 +00:00
ASSERT ( previous_state ! = Queued ) ;
2020-08-06 01:13:28 +00:00
if ( this ! = Thread : : current ( ) & & is_finalizable ( ) ) {
// Some other thread set this thread to Dying, notify the
// finalizer right away as it can be cleaned up now
Scheduler : : notify_finalizer ( ) ;
}
2020-07-05 20:32:07 +00:00
}
2019-04-17 10:41:51 +00:00
}
2019-07-25 19:02:19 +00:00
2020-08-02 22:59:01 +00:00
void Thread : : update_state_for_thread ( Thread : : State previous_state )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( g_scheduler_data ) ;
ASSERT ( g_scheduler_lock . own_lock ( ) ) ;
auto & previous_list = g_scheduler_data - > thread_list_for_state ( previous_state ) ;
auto & list = g_scheduler_data - > thread_list_for_state ( state ( ) ) ;
if ( & previous_list ! = & list ) {
previous_list . remove ( * this ) ;
}
if ( list . contains ( * this ) )
return ;
list . append ( * this ) ;
}
2020-08-02 02:04:56 +00:00
String Thread : : backtrace ( )
2019-08-06 17:43:07 +00:00
{
return backtrace_impl ( ) ;
}
2020-01-19 09:10:46 +00:00
struct RecognizedSymbol {
u32 address ;
2020-04-08 11:30:50 +00:00
const KernelSymbol * symbol { nullptr } ;
2020-01-19 09:10:46 +00:00
} ;
2020-03-02 09:40:40 +00:00
static bool symbolicate ( const RecognizedSymbol & symbol , const Process & process , StringBuilder & builder , Process : : ELFBundle * elf_bundle )
2020-01-19 09:10:46 +00:00
{
if ( ! symbol . address )
return false ;
bool mask_kernel_addresses = ! process . is_superuser ( ) ;
2020-04-08 11:30:50 +00:00
if ( ! symbol . symbol ) {
2020-01-19 09:10:46 +00:00
if ( ! is_user_address ( VirtualAddress ( symbol . address ) ) ) {
builder . append ( " 0xdeadc0de \n " ) ;
} else {
2020-06-27 19:42:28 +00:00
if ( elf_bundle & & elf_bundle - > elf_loader - > has_symbols ( ) )
2020-03-02 09:40:40 +00:00
builder . appendf ( " %p %s \n " , symbol . address , elf_bundle - > elf_loader - > symbolicate ( symbol . address ) . characters ( ) ) ;
2020-01-19 09:10:46 +00:00
else
builder . appendf ( " %p \n " , symbol . address ) ;
}
return true ;
}
2020-04-08 11:30:50 +00:00
unsigned offset = symbol . address - symbol . symbol - > address ;
if ( symbol . symbol - > address = = g_highest_kernel_symbol_address & & offset > 4096 ) {
2020-01-19 09:10:46 +00:00
builder . appendf ( " %p \n " , mask_kernel_addresses ? 0xdeadc0de : symbol . address ) ;
} else {
2020-04-08 11:30:50 +00:00
builder . appendf ( " %p %s +%u \n " , mask_kernel_addresses ? 0xdeadc0de : symbol . address , demangle ( symbol . symbol - > name ) . characters ( ) , offset ) ;
2020-01-19 09:10:46 +00:00
}
return true ;
}
2020-07-03 18:12:34 +00:00
String Thread : : backtrace_impl ( )
2019-07-25 19:02:19 +00:00
{
2020-01-19 09:10:46 +00:00
Vector < RecognizedSymbol , 128 > recognized_symbols ;
2019-07-25 19:02:19 +00:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
2020-09-15 19:50:47 +00:00
OwnPtr < Process : : ELFBundle > elf_bundle ;
if ( ! Processor : : current ( ) . in_irq ( ) ) {
// If we're handling IRQs we can't really safely symbolicate
elf_bundle = process . elf_bundle ( ) ;
}
2019-07-25 19:02:19 +00:00
ProcessPagingScope paging_scope ( process ) ;
2020-01-19 09:10:46 +00:00
2020-07-03 18:12:34 +00:00
// To prevent a context switch involving this thread, which may happen
// on another processor, we need to acquire the scheduler lock while
// walking the stack
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
FlatPtr stack_ptr , eip ;
if ( Processor : : get_context_frame_ptr ( * this , stack_ptr , eip ) ) {
recognized_symbols . append ( { eip , symbolicate_kernel_address ( eip ) } ) ;
2020-09-13 20:28:04 +00:00
while ( stack_ptr ) {
2020-07-03 18:12:34 +00:00
FlatPtr retaddr ;
2020-07-04 23:37:36 +00:00
2020-07-03 18:12:34 +00:00
if ( is_user_range ( VirtualAddress ( stack_ptr ) , sizeof ( FlatPtr ) * 2 ) ) {
2020-09-12 03:11:07 +00:00
if ( ! copy_from_user ( & retaddr , & ( ( FlatPtr * ) stack_ptr ) [ 1 ] ) )
break ;
2020-07-03 18:12:34 +00:00
recognized_symbols . append ( { retaddr , symbolicate_kernel_address ( retaddr ) } ) ;
2020-09-12 03:11:07 +00:00
if ( ! copy_from_user ( & stack_ptr , ( FlatPtr * ) stack_ptr ) )
break ;
2020-07-03 18:12:34 +00:00
} else {
2020-09-12 03:11:07 +00:00
void * fault_at ;
if ( ! safe_memcpy ( & retaddr , & ( ( FlatPtr * ) stack_ptr ) [ 1 ] , sizeof ( FlatPtr ) , fault_at ) )
break ;
2020-07-03 18:12:34 +00:00
recognized_symbols . append ( { retaddr , symbolicate_kernel_address ( retaddr ) } ) ;
2020-09-12 03:11:07 +00:00
if ( ! safe_memcpy ( & stack_ptr , ( FlatPtr * ) stack_ptr , sizeof ( FlatPtr ) , fault_at ) )
break ;
2020-07-03 18:12:34 +00:00
}
}
2020-01-19 09:10:46 +00:00
}
2019-07-25 19:02:19 +00:00
}
2020-01-19 09:10:46 +00:00
StringBuilder builder ;
2019-07-25 19:02:19 +00:00
for ( auto & symbol : recognized_symbols ) {
2020-03-02 09:40:40 +00:00
if ( ! symbolicate ( symbol , process , builder , elf_bundle . ptr ( ) ) )
2019-07-25 19:02:19 +00:00
break ;
}
return builder . to_string ( ) ;
}
2019-09-07 13:50:44 +00:00
2020-04-11 18:39:27 +00:00
Vector < FlatPtr > Thread : : raw_backtrace ( FlatPtr ebp , FlatPtr eip ) const
2019-12-11 19:36:56 +00:00
{
2020-02-29 20:36:00 +00:00
InterruptDisabler disabler ;
2019-12-11 19:36:56 +00:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
ProcessPagingScope paging_scope ( process ) ;
2020-03-08 09:36:51 +00:00
Vector < FlatPtr , Profiling : : max_stack_frame_count > backtrace ;
2020-04-11 18:39:27 +00:00
backtrace . append ( eip ) ;
2020-09-12 03:11:07 +00:00
FlatPtr stack_ptr_copy ;
FlatPtr stack_ptr = ( FlatPtr ) ebp ;
2020-09-13 20:28:04 +00:00
while ( stack_ptr ) {
2020-09-12 03:11:07 +00:00
void * fault_at ;
if ( ! safe_memcpy ( & stack_ptr_copy , ( void * ) stack_ptr , sizeof ( FlatPtr ) , fault_at ) )
break ;
FlatPtr retaddr ;
if ( ! safe_memcpy ( & retaddr , ( void * ) ( stack_ptr + sizeof ( FlatPtr ) ) , sizeof ( FlatPtr ) , fault_at ) )
break ;
2019-12-11 19:36:56 +00:00
backtrace . append ( retaddr ) ;
2020-01-19 12:53:22 +00:00
if ( backtrace . size ( ) = = Profiling : : max_stack_frame_count )
break ;
2020-09-12 03:11:07 +00:00
stack_ptr = stack_ptr_copy ;
2019-12-11 19:36:56 +00:00
}
return backtrace ;
}
2020-09-16 17:47:47 +00:00
KResult Thread : : make_thread_specific_region ( Badge < Process > )
2019-09-07 13:50:44 +00:00
{
size_t thread_specific_region_alignment = max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
2020-06-27 19:42:28 +00:00
m_thread_specific_region_size = align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) + sizeof ( ThreadSpecificData ) ;
auto * region = process ( ) . allocate_region ( { } , m_thread_specific_region_size , " Thread-specific " , PROT_READ | PROT_WRITE , true ) ;
2020-09-16 17:47:47 +00:00
if ( ! region )
return KResult ( - ENOMEM ) ;
2020-01-05 17:00:15 +00:00
SmapDisabler disabler ;
2019-09-07 13:50:44 +00:00
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
2020-01-20 12:06:41 +00:00
m_thread_specific_data = VirtualAddress ( thread_specific_data ) ;
2019-09-07 13:50:44 +00:00
thread_specific_data - > self = thread_specific_data ;
2019-09-07 15:06:25 +00:00
if ( process ( ) . m_master_tls_size )
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 22:26:13 +00:00
memcpy ( thread_local_storage , process ( ) . m_master_tls_region . unsafe_ptr ( ) - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2020-09-16 17:47:47 +00:00
return KSuccess ;
2019-09-07 13:50:44 +00:00
}
2019-10-13 12:36:55 +00:00
const LogStream & operator < < ( const LogStream & stream , const Thread & value )
{
2020-08-08 15:32:34 +00:00
return stream < < value . process ( ) . name ( ) < < " ( " < < value . pid ( ) . value ( ) < < " : " < < value . tid ( ) . value ( ) < < " ) " ;
2019-10-13 12:36:55 +00:00
}
2019-11-06 15:26:51 +00:00
2020-11-15 18:58:19 +00:00
Thread : : BlockResult Thread : : wait_on ( WaitQueue & queue , const char * reason , const BlockTimeout & timeout , Atomic < bool > * lock , RefPtr < Thread > beneficiary )
2019-12-22 11:23:44 +00:00
{
2020-08-06 01:13:28 +00:00
auto * current_thread = Thread : : current ( ) ;
2020-11-15 18:58:19 +00:00
RefPtr < Timer > timer ;
bool block_finished = false ;
bool did_timeout = false ;
2020-06-27 19:42:28 +00:00
bool did_unlock ;
{
2020-07-05 20:32:07 +00:00
ScopedCritical critical ;
// We need to be in a critical section *and* then also acquire the
// scheduler lock. The only way acquiring the scheduler lock could
// block us is if another core were to be holding it, in which case
// we need to wait until the scheduler lock is released again
{
ScopedSpinLock sched_lock ( g_scheduler_lock ) ;
2020-11-15 18:58:19 +00:00
if ( ! timeout . is_infinite ( ) ) {
timer = TimerQueue : : the ( ) . add_timer_without_id ( timeout . absolute_time ( ) , [ & ] ( ) {
// NOTE: this may execute on the same or any other processor!
ScopedSpinLock lock ( g_scheduler_lock ) ;
if ( ! block_finished ) {
did_timeout = true ;
wake_from_queue ( ) ;
}
} ) ;
if ( ! timer ) {
dbg ( ) < < " wait_on timed out before blocking " ;
// We timed out already, don't block
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called
critical . set_interrupt_flag_on_destruction ( true ) ;
return BlockResult : : InterruptedByTimeout ;
}
}
2020-09-26 16:55:48 +00:00
// m_queue can only be accessed safely if g_scheduler_lock is held!
m_queue = & queue ;
2020-08-06 01:13:28 +00:00
if ( ! queue . enqueue ( * current_thread ) ) {
2020-07-05 21:46:51 +00:00
// The WaitQueue was already requested to wake someone when
// nobody was waiting. So return right away as we shouldn't
// be waiting
2020-07-06 09:31:21 +00:00
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called
critical . set_interrupt_flag_on_destruction ( true ) ;
2020-07-05 21:46:51 +00:00
return BlockResult : : NotBlocked ;
}
2020-07-05 20:32:07 +00:00
did_unlock = unlock_process_if_locked ( ) ;
if ( lock )
* lock = false ;
set_state ( State : : Queued ) ;
m_wait_reason = reason ;
2020-06-27 19:42:28 +00:00
2020-07-05 20:32:07 +00:00
// Yield and wait for the queue to wake us up again.
if ( beneficiary )
Scheduler : : donate_to ( beneficiary , reason ) ;
else
Scheduler : : yield ( ) ;
2020-06-27 19:42:28 +00:00
}
2020-07-05 20:32:07 +00:00
// We've unblocked, relock the process if needed and carry on.
relock_process ( did_unlock ) ;
2020-06-27 19:42:28 +00:00
2020-07-05 20:32:07 +00:00
// This looks counter productive, but we may not actually leave
// the critical section we just restored. It depends on whether
// we were in one while being called.
2020-08-10 20:05:24 +00:00
if ( current_thread - > should_die ( ) ) {
// We're being unblocked so that we can clean up. We shouldn't
// be in Dying state until we're about to return back to user mode
ASSERT ( current_thread - > state ( ) = = Thread : : Running ) ;
# ifdef THREAD_DEBUG
dbg ( ) < < " Dying thread " < < * current_thread < < " was unblocked " ;
# endif
}
2020-07-05 20:32:07 +00:00
}
2020-04-26 09:32:37 +00:00
2020-07-06 23:10:52 +00:00
BlockResult result ( BlockResult : : WokeNormally ) ;
2020-07-05 20:32:07 +00:00
{
// To be able to look at m_wait_queue_node we once again need the
// scheduler lock, which is held when we insert into the queue
ScopedSpinLock sched_lock ( g_scheduler_lock ) ;
2020-11-15 18:58:19 +00:00
block_finished = true ;
2020-07-04 23:37:36 +00:00
2020-09-26 16:55:48 +00:00
if ( m_queue ) {
ASSERT ( m_queue = = & queue ) ;
// If our thread was still in the queue, we timed out
m_queue = nullptr ;
if ( queue . dequeue ( * current_thread ) )
result = BlockResult : : InterruptedByTimeout ;
} else {
// Our thread was already removed from the queue. The only
// way this can happen if someone else is trying to kill us.
// In this case, the queue should not contain us anymore.
2020-09-27 14:53:35 +00:00
result = BlockResult : : InterruptedByDeath ;
2020-09-26 16:55:48 +00:00
}
2020-11-15 18:58:19 +00:00
}
2020-07-04 23:37:36 +00:00
2020-11-15 18:58:19 +00:00
if ( timer & & ! did_timeout ) {
// Cancel the timer while not holding any locks. This allows
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
TimerQueue : : the ( ) . cancel_timer ( timer . release_nonnull ( ) ) ;
2020-07-05 20:32:07 +00:00
}
2020-04-26 09:32:37 +00:00
2020-07-05 20:32:07 +00:00
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called
sti ( ) ;
2020-04-26 09:32:37 +00:00
return result ;
2019-12-22 11:23:44 +00:00
}
void Thread : : wake_from_queue ( )
{
2020-07-03 11:19:50 +00:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-12-22 11:23:44 +00:00
ASSERT ( state ( ) = = State : : Queued ) ;
2020-07-04 21:55:20 +00:00
m_wait_reason = nullptr ;
2020-06-28 21:34:31 +00:00
if ( this ! = Thread : : current ( ) )
2020-06-27 19:42:28 +00:00
set_state ( State : : Runnable ) ;
else
set_state ( State : : Running ) ;
2019-12-22 11:23:44 +00:00
}
2019-12-30 18:23:13 +00:00
2020-09-27 14:53:35 +00:00
RefPtr < Thread > Thread : : from_tid ( ThreadID tid )
2019-12-30 18:23:13 +00:00
{
2020-09-27 14:53:35 +00:00
RefPtr < Thread > found_thread ;
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-12-30 18:23:13 +00:00
Thread : : for_each ( [ & ] ( auto & thread ) {
2020-01-04 17:56:04 +00:00
if ( thread . tid ( ) = = tid ) {
2019-12-30 18:23:13 +00:00
found_thread = & thread ;
2020-01-04 17:56:04 +00:00
return IterationDecision : : Break ;
}
2019-12-30 18:23:13 +00:00
return IterationDecision : : Continue ;
} ) ;
return found_thread ;
}
2020-02-16 00:27:42 +00:00
2020-02-18 12:44:27 +00:00
void Thread : : reset_fpu_state ( )
{
2020-06-27 19:42:28 +00:00
memcpy ( m_fpu_state , & Processor : : current ( ) . clean_fpu_state ( ) , sizeof ( FPUState ) ) ;
2020-02-18 12:44:27 +00:00
}
2020-08-08 15:32:34 +00:00
void Thread : : start_tracing_from ( ProcessID tracer )
2020-03-28 08:47:16 +00:00
{
m_tracer = ThreadTracer : : create ( tracer ) ;
}
void Thread : : stop_tracing ( )
{
m_tracer = nullptr ;
}
void Thread : : tracer_trap ( const RegisterState & regs )
{
ASSERT ( m_tracer . ptr ( ) ) ;
m_tracer - > set_regs ( regs ) ;
send_urgent_signal_to_self ( SIGTRAP ) ;
}
2020-04-07 15:23:37 +00:00
const Thread : : Blocker & Thread : : blocker ( ) const
{
2020-09-27 14:53:35 +00:00
ASSERT ( m_lock . own_lock ( ) ) ;
2020-04-07 15:23:37 +00:00
ASSERT ( m_blocker ) ;
return * m_blocker ;
}
2020-02-16 00:27:42 +00:00
}