2019-07-27 10:01:14 +00:00
# include <AK/ELF/ELFLoader.h>
2019-07-25 19:02:19 +00:00
# include <AK/StringBuilder.h>
2019-06-07 07:36:51 +00:00
# include <Kernel/FileSystem/FileDescription.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
# include <Kernel/Thread.h>
2019-04-03 13:13:07 +00:00
# include <Kernel/VM/MemoryManager.h>
2019-03-23 21:03:17 +00:00
# include <LibC/signal_numbers.h>
2019-05-22 11:23:41 +00:00
//#define SIGNAL_DEBUG
2019-09-07 13:50:44 +00:00
u16 thread_specific_selector ( )
{
static u16 selector ;
if ( ! selector ) {
selector = gdt_alloc_entry ( ) ;
auto & descriptor = get_gdt_entry ( selector ) ;
descriptor . dpl = 3 ;
descriptor . segment_present = 1 ;
descriptor . granularity = 0 ;
descriptor . zero = 0 ;
descriptor . operation_size = 1 ;
descriptor . descriptor_type = 1 ;
descriptor . type = 2 ;
}
return selector ;
}
Descriptor & thread_specific_descriptor ( )
{
return get_gdt_entry ( thread_specific_selector ( ) ) ;
}
2019-05-18 16:31:36 +00:00
HashTable < Thread * > & thread_table ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
static HashTable < Thread * > * table ;
if ( ! table )
table = new HashTable < Thread * > ;
return * table ;
}
2019-03-23 21:03:17 +00:00
Thread : : Thread ( Process & process )
: m_process ( process )
, m_tid ( process . m_next_tid + + )
{
2019-04-23 20:17:01 +00:00
dbgprintf ( " Thread{%p}: New thread TID=%u in %s(%u) \n " , this , m_tid , process . name ( ) . characters ( ) , process . pid ( ) ) ;
2019-03-23 21:03:17 +00:00
set_default_signal_dispositions ( ) ;
2019-03-27 14:27:45 +00:00
m_fpu_state = ( FPUState * ) kmalloc_aligned ( sizeof ( FPUState ) , 16 ) ;
2019-10-13 12:36:55 +00:00
memset ( m_fpu_state , 0 , sizeof ( FPUState ) ) ;
2019-03-23 21:03:17 +00:00
memset ( & m_tss , 0 , sizeof ( m_tss ) ) ;
// Only IF is set when a process boots.
m_tss . eflags = 0x0202 ;
2019-09-07 13:50:44 +00:00
u16 cs , ds , ss , gs ;
2019-03-23 21:03:17 +00:00
if ( m_process . is_ring0 ( ) ) {
cs = 0x08 ;
ds = 0x10 ;
ss = 0x10 ;
2019-09-07 13:50:44 +00:00
gs = 0 ;
2019-03-23 21:03:17 +00:00
} else {
cs = 0x1b ;
ds = 0x23 ;
ss = 0x23 ;
2019-09-07 13:50:44 +00:00
gs = thread_specific_selector ( ) | 3 ;
2019-03-23 21:03:17 +00:00
}
m_tss . ds = ds ;
m_tss . es = ds ;
m_tss . fs = ds ;
2019-09-07 13:50:44 +00:00
m_tss . gs = gs ;
2019-03-23 21:03:17 +00:00
m_tss . ss = ss ;
m_tss . cs = cs ;
m_tss . cr3 = m_process . page_directory ( ) . cr3 ( ) ;
if ( m_process . is_ring0 ( ) ) {
// FIXME: This memory is leaked.
// But uh, there's also no kernel process termination, so I guess it's not technically leaked...
2019-07-03 19:17:35 +00:00
m_kernel_stack_base = ( u32 ) kmalloc_eternal ( default_kernel_stack_size ) ;
2019-09-04 02:31:38 +00:00
m_kernel_stack_top = ( m_kernel_stack_base + default_kernel_stack_size ) & 0xfffffff8u ;
m_tss . esp = m_kernel_stack_top ;
2019-05-17 01:43:51 +00:00
2019-03-23 21:03:17 +00:00
} else {
// Ring3 processes need a separate stack for Ring0.
2019-05-14 09:51:00 +00:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid ) ) ;
2019-06-07 10:56:50 +00:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
2019-09-04 02:31:38 +00:00
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2019-03-23 21:03:17 +00:00
m_tss . ss0 = 0x10 ;
2019-09-04 02:31:38 +00:00
m_tss . esp0 = m_kernel_stack_top ;
2019-03-23 21:03:17 +00:00
}
// HACK: Ring2 SS in the TSS is the current PID.
m_tss . ss2 = m_process . pid ( ) ;
m_far_ptr . offset = 0x98765432 ;
2019-03-23 21:17:38 +00:00
if ( m_process . pid ( ) ! = 0 ) {
InterruptDisabler disabler ;
2019-05-18 16:31:36 +00:00
thread_table ( ) . set ( this ) ;
2019-07-19 15:21:13 +00:00
Scheduler : : init_thread ( * this ) ;
2019-03-23 21:17:38 +00:00
}
2019-03-23 21:03:17 +00:00
}
Thread : : ~ Thread ( )
{
dbgprintf ( " ~Thread{%p} \n " , this ) ;
2019-03-27 14:27:45 +00:00
kfree_aligned ( m_fpu_state ) ;
2019-03-23 21:03:17 +00:00
{
InterruptDisabler disabler ;
2019-05-18 16:31:36 +00:00
thread_table ( ) . remove ( this ) ;
2019-03-23 21:03:17 +00:00
}
if ( g_last_fpu_thread = = this )
g_last_fpu_thread = nullptr ;
if ( selector ( ) )
gdt_free_entry ( selector ( ) ) ;
2019-08-01 18:17:12 +00:00
if ( m_userspace_stack_region )
m_process . deallocate_region ( * m_userspace_stack_region ) ;
2019-03-23 21:03:17 +00:00
}
void Thread : : unblock ( )
{
if ( current = = this ) {
2019-05-18 16:31:36 +00:00
set_state ( Thread : : Running ) ;
2019-03-23 21:03:17 +00:00
return ;
}
ASSERT ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
2019-05-18 16:31:36 +00:00
set_state ( Thread : : Runnable ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-19 07:57:35 +00:00
void Thread : : block_helper ( )
2019-03-23 21:03:17 +00:00
{
2019-07-19 15:58:45 +00:00
// This function mostly exists to avoid circular header dependencies. If
// anything needs adding, think carefully about whether it belongs in
// block() instead. Remember that we're unlocking here, so be very careful
// about altering any state once we're unlocked!
2019-04-01 18:02:05 +00:00
bool did_unlock = process ( ) . big_lock ( ) . unlock_if_locked ( ) ;
2019-03-23 21:03:17 +00:00
Scheduler : : yield ( ) ;
2019-04-01 18:02:05 +00:00
if ( did_unlock )
process ( ) . big_lock ( ) . lock ( ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-18 15:26:11 +00:00
u64 Thread : : sleep ( u32 ticks )
2019-03-23 21:03:17 +00:00
{
2019-03-24 00:52:10 +00:00
ASSERT ( state ( ) = = Thread : : Running ) ;
2019-07-18 15:26:11 +00:00
u64 wakeup_time = g_uptime + ticks ;
2019-07-20 09:05:52 +00:00
auto ret = current - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
if ( wakeup_time > g_uptime ) {
ASSERT ( ret = = Thread : : BlockResult : : InterruptedBySignal ) ;
}
2019-07-18 15:26:11 +00:00
return wakeup_time ;
2019-03-23 21:03:17 +00:00
}
2019-11-02 18:34:06 +00:00
u64 Thread : : sleep_until ( u64 wakeup_time )
{
ASSERT ( state ( ) = = Thread : : Running ) ;
auto ret = current - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
if ( wakeup_time > g_uptime )
ASSERT ( ret = = Thread : : BlockResult : : InterruptedBySignal ) ;
return wakeup_time ;
}
2019-07-19 07:51:48 +00:00
const char * Thread : : state_string ( ) const
2019-03-23 21:03:17 +00:00
{
2019-07-19 07:51:48 +00:00
switch ( state ( ) ) {
2019-06-07 09:43:58 +00:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
case Thread : : Skip1SchedulerPass :
return " Skip1 " ;
case Thread : : Skip0SchedulerPasses :
return " Skip0 " ;
2019-07-19 07:37:34 +00:00
case Thread : : Blocked :
2019-09-09 03:58:42 +00:00
ASSERT ( m_blocker ! = nullptr ) ;
return m_blocker - > state_string ( ) ;
2019-03-23 21:03:17 +00:00
}
2019-09-08 12:29:59 +00:00
kprintf ( " Thread::state_string(): Invalid state: %u \n " , state ( ) ) ;
2019-03-23 21:03:17 +00:00
ASSERT_NOT_REACHED ( ) ;
return nullptr ;
}
void Thread : : finalize ( )
{
2019-08-01 18:01:23 +00:00
ASSERT ( current = = g_finalizer ) ;
2019-03-23 21:03:17 +00:00
dbgprintf ( " Finalizing Thread %u in %s(%u) \n " , tid ( ) , m_process . name ( ) . characters ( ) , pid ( ) ) ;
set_state ( Thread : : State : : Dead ) ;
2019-08-06 17:43:07 +00:00
if ( m_dump_backtrace_on_finalization )
dbg ( ) < < backtrace_impl ( ) ;
2019-08-01 18:01:23 +00:00
if ( this = = & m_process . main_thread ( ) ) {
2019-03-23 21:03:17 +00:00
m_process . finalize ( ) ;
2019-08-01 18:01:23 +00:00
return ;
}
delete this ;
2019-03-23 21:03:17 +00:00
}
void Thread : : finalize_dying_threads ( )
{
2019-08-01 18:01:23 +00:00
ASSERT ( current = = g_finalizer ) ;
2019-04-20 12:02:19 +00:00
Vector < Thread * , 32 > dying_threads ;
2019-03-23 21:03:17 +00:00
{
InterruptDisabler disabler ;
2019-06-07 09:43:58 +00:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2019-03-23 21:03:17 +00:00
dying_threads . append ( & thread ) ;
2019-07-19 10:16:00 +00:00
return IterationDecision : : Continue ;
2019-03-23 21:03:17 +00:00
} ) ;
}
for ( auto * thread : dying_threads )
thread - > finalize ( ) ;
}
bool Thread : : tick ( )
{
+ + m_ticks ;
if ( tss ( ) . cs & 3 )
+ + m_process . m_ticks_in_user ;
else
+ + m_process . m_ticks_in_kernel ;
return - - m_ticks_left ;
}
2019-07-03 19:17:35 +00:00
void Thread : : send_signal ( u8 signal , Process * sender )
2019-03-23 21:03:17 +00:00
{
ASSERT ( signal < 32 ) ;
2019-07-08 16:59:48 +00:00
InterruptDisabler disabler ;
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
dbg ( ) < < " signal " < < signal < < " was ignored by " < < process ( ) ;
return ;
}
2019-03-23 21:03:17 +00:00
if ( sender )
dbgprintf ( " signal: %s(%u) sent %d to %s(%u) \n " , sender - > name ( ) . characters ( ) , sender - > pid ( ) , signal , process ( ) . name ( ) . characters ( ) , pid ( ) ) ;
else
dbgprintf ( " signal: kernel sent %d to %s(%u) \n " , signal , process ( ) . name ( ) . characters ( ) , pid ( ) ) ;
2019-08-01 09:00:36 +00:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2019-03-23 21:03:17 +00:00
}
2019-10-07 09:22:50 +00:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
// FIXME: because of a bug in dispatch_signal we can't
// setup a signal while we are the current thread. Because of
// this we use a work-around where we send the signal and then
// block, allowing the scheduler to properly dispatch the signal
// before the thread is next run.
send_signal ( signal , & process ( ) ) ;
( void ) block < SemiPermanentBlocker > ( SemiPermanentBlocker : : Reason : : Signal ) ;
}
2019-03-23 21:03:17 +00:00
bool Thread : : has_unmasked_pending_signals ( ) const
{
return m_pending_signals & ~ m_signal_mask ;
}
ShouldUnblockThread Thread : : dispatch_one_pending_signal ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-07-03 19:17:35 +00:00
u32 signal_candidates = m_pending_signals & ~ m_signal_mask ;
2019-03-23 21:03:17 +00:00
ASSERT ( signal_candidates ) ;
2019-08-01 09:00:36 +00:00
u8 signal = 1 ;
2019-03-23 21:03:17 +00:00
for ( ; signal < 32 ; + + signal ) {
2019-08-01 09:00:36 +00:00
if ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) {
2019-03-23 21:03:17 +00:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2019-06-07 15:13:23 +00:00
enum class DefaultSignalAction {
2019-03-23 21:03:17 +00:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
2019-07-03 19:17:35 +00:00
DefaultSignalAction default_signal_action ( u8 signal )
2019-03-23 21:03:17 +00:00
{
ASSERT ( signal & & signal < NSIG ) ;
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
case SIGPWR :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-07-08 16:59:48 +00:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-10-07 09:22:50 +00:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
return ! action . handler_or_sigaction . is_null ( ) ;
}
2019-07-03 19:17:35 +00:00
ShouldUnblockThread Thread : : dispatch_signal ( u8 signal )
2019-03-23 21:03:17 +00:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-08-01 09:00:36 +00:00
ASSERT ( signal > 0 & & signal < = 32 ) ;
2019-09-04 13:14:54 +00:00
ASSERT ( ! process ( ) . is_ring0 ( ) ) ;
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " dispatch_signal %s(%u) <- %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-23 21:03:17 +00:00
# endif
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT ( ! ( action . flags & SA_SIGINFO ) ) ;
// Mark this signal as handled.
2019-08-01 09:00:36 +00:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-23 21:03:17 +00:00
if ( signal = = SIGSTOP ) {
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
}
if ( signal = = SIGCONT & & state ( ) = = Stopped )
set_state ( Runnable ) ;
2019-06-07 10:56:50 +00:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-23 21:03:17 +00:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
2019-08-06 17:43:07 +00:00
case DefaultSignalAction : : DumpCore :
process ( ) . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
return IterationDecision : : Continue ;
} ) ;
2019-07-25 19:02:19 +00:00
[[fallthrough]] ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Terminate :
m_process . terminate_due_to_signal ( signal ) ;
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : Ignore :
2019-07-19 07:34:11 +00:00
ASSERT_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Continue :
return ShouldUnblockThread : : Yes ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-06-07 10:56:50 +00:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " %s(%u) ignored signal %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-23 21:03:17 +00:00
# endif
return ShouldUnblockThread : : Yes ;
}
2019-09-04 13:14:54 +00:00
ProcessPagingScope paging_scope ( m_process ) ;
// The userspace registers should be stored at the top of the stack
// We have to subtract 2 because the processor decrements the kernel
// stack before pushing the args.
auto & regs = * ( RegisterDump * ) ( kernel_stack_top ( ) - sizeof ( RegisterDump ) - 2 ) ;
2019-07-03 19:17:35 +00:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-23 21:03:17 +00:00
if ( action . flags & SA_NODEFER )
2019-08-01 09:00:36 +00:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-23 21:03:17 +00:00
else
2019-08-01 09:00:36 +00:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-23 21:03:17 +00:00
m_signal_mask | = new_signal_mask ;
2019-09-04 13:14:54 +00:00
u32 old_esp = regs . esp_if_crossRing ;
u32 ret_eip = regs . eip ;
u32 ret_eflags = regs . eflags ;
2019-03-23 21:03:17 +00:00
2019-09-05 11:35:57 +00:00
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here.
u32 stack_alignment = ( regs . esp_if_crossRing - 56 ) % 16 ;
regs . esp_if_crossRing - = stack_alignment ;
2019-09-04 13:14:54 +00:00
push_value_on_user_stack ( regs , ret_eflags ) ;
2019-03-23 21:03:17 +00:00
2019-09-04 13:14:54 +00:00
push_value_on_user_stack ( regs , ret_eip ) ;
push_value_on_user_stack ( regs , regs . eax ) ;
push_value_on_user_stack ( regs , regs . ecx ) ;
push_value_on_user_stack ( regs , regs . edx ) ;
push_value_on_user_stack ( regs , regs . ebx ) ;
push_value_on_user_stack ( regs , old_esp ) ;
push_value_on_user_stack ( regs , regs . ebp ) ;
push_value_on_user_stack ( regs , regs . esi ) ;
push_value_on_user_stack ( regs , regs . edi ) ;
2019-03-23 21:03:17 +00:00
// PUSH old_signal_mask
2019-09-04 13:14:54 +00:00
push_value_on_user_stack ( regs , old_signal_mask ) ;
2019-03-23 21:03:17 +00:00
2019-09-04 13:14:54 +00:00
push_value_on_user_stack ( regs , signal ) ;
push_value_on_user_stack ( regs , handler_vaddr . get ( ) ) ;
push_value_on_user_stack ( regs , 0 ) ; //push fake return address
2019-03-23 21:03:17 +00:00
2019-09-04 13:14:54 +00:00
regs . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-03-23 21:03:17 +00:00
2019-09-05 11:35:57 +00:00
ASSERT ( ( regs . esp_if_crossRing % 16 ) = = 0 ) ;
2019-03-23 21:03:17 +00:00
2019-09-04 13:14:54 +00:00
// If we're not blocking we need to update the tss so
// that the far jump in Scheduler goes to the proper location.
// When we are blocking we don't update the TSS as we want to
// resume at the blocker and descend the stack, cleaning up nicely.
if ( ! in_kernel ( ) ) {
Scheduler : : prepare_to_modify_tss ( * this ) ;
m_tss . cs = 0x1b ;
m_tss . ds = 0x23 ;
m_tss . es = 0x23 ;
m_tss . fs = 0x23 ;
2019-09-07 13:50:44 +00:00
m_tss . gs = thread_specific_selector ( ) | 3 ;
2019-09-04 13:14:54 +00:00
m_tss . eip = regs . eip ;
m_tss . esp = regs . esp_if_crossRing ;
// FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
set_state ( Skip1SchedulerPass ) ;
}
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2019-09-08 12:29:59 +00:00
kprintf ( " signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , state_string ( ) , m_tss . cs , m_tss . eip ) ;
2019-03-23 21:03:17 +00:00
# endif
return ShouldUnblockThread : : Yes ;
}
void Thread : : set_default_signal_dispositions ( )
{
// FIXME: Set up all the right default actions. See signal(7).
memset ( & m_signal_action_data , 0 , sizeof ( m_signal_action_data ) ) ;
2019-07-03 19:17:35 +00:00
m_signal_action_data [ SIGCHLD ] . handler_or_sigaction = VirtualAddress ( ( u32 ) SIG_IGN ) ;
m_signal_action_data [ SIGWINCH ] . handler_or_sigaction = VirtualAddress ( ( u32 ) SIG_IGN ) ;
2019-03-23 21:03:17 +00:00
}
2019-09-04 13:14:54 +00:00
void Thread : : push_value_on_user_stack ( RegisterDump & registers , u32 value )
{
registers . esp_if_crossRing - = 4 ;
u32 * stack_ptr = ( u32 * ) registers . esp_if_crossRing ;
* stack_ptr = value ;
}
2019-07-03 19:17:35 +00:00
void Thread : : push_value_on_stack ( u32 value )
2019-03-23 21:03:17 +00:00
{
m_tss . esp - = 4 ;
2019-07-03 19:17:35 +00:00
u32 * stack_ptr = ( u32 * ) m_tss . esp ;
2019-03-23 21:03:17 +00:00
* stack_ptr = value ;
}
2019-03-23 21:59:08 +00:00
void Thread : : make_userspace_stack_for_main_thread ( Vector < String > arguments , Vector < String > environment )
2019-03-23 21:03:17 +00:00
{
2019-10-31 12:57:07 +00:00
auto * region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , " Stack (Main thread) " , PROT_READ | PROT_WRITE , false ) ;
2019-03-23 21:03:17 +00:00
ASSERT ( region ) ;
2019-06-07 10:56:50 +00:00
m_tss . esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
2019-03-23 21:03:17 +00:00
2019-06-07 10:56:50 +00:00
char * stack_base = ( char * ) region - > vaddr ( ) . get ( ) ;
2019-03-23 21:03:17 +00:00
int argc = arguments . size ( ) ;
char * * argv = ( char * * ) stack_base ;
char * * env = argv + arguments . size ( ) + 1 ;
char * bufptr = stack_base + ( sizeof ( char * ) * ( arguments . size ( ) + 1 ) ) + ( sizeof ( char * ) * ( environment . size ( ) + 1 ) ) ;
for ( int i = 0 ; i < arguments . size ( ) ; + + i ) {
argv [ i ] = bufptr ;
memcpy ( bufptr , arguments [ i ] . characters ( ) , arguments [ i ] . length ( ) ) ;
bufptr + = arguments [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
argv [ arguments . size ( ) ] = nullptr ;
for ( int i = 0 ; i < environment . size ( ) ; + + i ) {
env [ i ] = bufptr ;
memcpy ( bufptr , environment [ i ] . characters ( ) , environment [ i ] . length ( ) ) ;
bufptr + = environment [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
env [ environment . size ( ) ] = nullptr ;
// NOTE: The stack needs to be 16-byte aligned.
2019-07-03 19:17:35 +00:00
push_value_on_stack ( ( u32 ) env ) ;
push_value_on_stack ( ( u32 ) argv ) ;
push_value_on_stack ( ( u32 ) argc ) ;
2019-03-23 21:03:17 +00:00
push_value_on_stack ( 0 ) ;
}
2019-06-07 09:43:58 +00:00
void Thread : : make_userspace_stack_for_secondary_thread ( void * argument )
2019-03-23 21:59:08 +00:00
{
2019-10-31 12:57:07 +00:00
m_userspace_stack_region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , String : : format ( " Stack (Thread %d) " , tid ( ) ) , PROT_READ | PROT_WRITE , false ) ;
2019-08-01 18:17:12 +00:00
ASSERT ( m_userspace_stack_region ) ;
m_tss . esp = m_userspace_stack_region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
2019-03-23 21:59:08 +00:00
// NOTE: The stack needs to be 16-byte aligned.
2019-07-03 19:17:35 +00:00
push_value_on_stack ( ( u32 ) argument ) ;
2019-03-23 21:59:08 +00:00
push_value_on_stack ( 0 ) ;
}
2019-03-23 21:03:17 +00:00
Thread * Thread : : clone ( Process & process )
{
auto * clone = new Thread ( process ) ;
memcpy ( clone - > m_signal_action_data , m_signal_action_data , sizeof ( m_signal_action_data ) ) ;
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 14:27:45 +00:00
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-03-23 21:03:17 +00:00
clone - > m_has_used_fpu = m_has_used_fpu ;
2019-09-07 13:50:44 +00:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2019-03-23 21:03:17 +00:00
return clone ;
}
void Thread : : initialize ( )
{
Scheduler : : initialize ( ) ;
}
2019-03-23 22:50:34 +00:00
Vector < Thread * > Thread : : all_threads ( )
{
Vector < Thread * > threads ;
InterruptDisabler disabler ;
2019-05-18 16:31:36 +00:00
threads . ensure_capacity ( thread_table ( ) . size ( ) ) ;
for ( auto * thread : thread_table ( ) )
threads . unchecked_append ( thread ) ;
2019-03-23 22:50:34 +00:00
return threads ;
}
2019-04-17 10:41:51 +00:00
bool Thread : : is_thread ( void * ptr )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-05-18 16:31:36 +00:00
return thread_table ( ) . contains ( ( Thread * ) ptr ) ;
}
2019-05-18 18:07:00 +00:00
void Thread : : set_state ( State new_state )
{
2019-06-30 09:40:23 +00:00
InterruptDisabler disabler ;
2019-07-19 15:58:45 +00:00
if ( new_state = = Blocked ) {
2019-07-21 10:14:58 +00:00
// we should always have a Blocker while blocked
2019-09-09 03:58:42 +00:00
ASSERT ( m_blocker ! = nullptr ) ;
2019-07-19 15:58:45 +00:00
}
2019-05-18 18:07:00 +00:00
m_state = new_state ;
2019-07-19 11:04:42 +00:00
if ( m_process . pid ( ) ! = 0 ) {
2019-07-19 15:21:13 +00:00
Scheduler : : update_state_for_thread ( * this ) ;
2019-07-19 11:04:42 +00:00
}
2019-04-17 10:41:51 +00:00
}
2019-07-25 19:02:19 +00:00
String Thread : : backtrace ( ProcessInspectionHandle & ) const
2019-08-06 17:43:07 +00:00
{
return backtrace_impl ( ) ;
}
String Thread : : backtrace_impl ( ) const
2019-07-25 19:02:19 +00:00
{
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
ProcessPagingScope paging_scope ( process ) ;
struct RecognizedSymbol {
u32 address ;
const KSym * ksym ;
} ;
StringBuilder builder ;
Vector < RecognizedSymbol , 64 > recognized_symbols ;
recognized_symbols . append ( { tss ( ) . eip , ksymbolicate ( tss ( ) . eip ) } ) ;
for ( u32 * stack_ptr = ( u32 * ) frame_ptr ( ) ; process . validate_read_from_kernel ( VirtualAddress ( ( u32 ) stack_ptr ) ) ; stack_ptr = ( u32 * ) * stack_ptr ) {
u32 retaddr = stack_ptr [ 1 ] ;
recognized_symbols . append ( { retaddr , ksymbolicate ( retaddr ) } ) ;
}
for ( auto & symbol : recognized_symbols ) {
if ( ! symbol . address )
break ;
if ( ! symbol . ksym ) {
2019-07-27 10:01:14 +00:00
if ( ! Scheduler : : is_active ( ) & & process . elf_loader ( ) & & process . elf_loader ( ) - > has_symbols ( ) )
builder . appendf ( " %p %s \n " , symbol . address , process . elf_loader ( ) - > symbolicate ( symbol . address ) . characters ( ) ) ;
else
builder . appendf ( " %p \n " , symbol . address ) ;
2019-07-25 19:02:19 +00:00
continue ;
}
unsigned offset = symbol . address - symbol . ksym - > address ;
if ( symbol . ksym - > address = = ksym_highest_address & & offset > 4096 )
builder . appendf ( " %p \n " , symbol . address ) ;
else
builder . appendf ( " %p %s +%u \n " , symbol . address , symbol . ksym - > name , offset ) ;
}
return builder . to_string ( ) ;
}
2019-09-07 13:50:44 +00:00
void Thread : : make_thread_specific_region ( Badge < Process > )
{
size_t thread_specific_region_alignment = max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
size_t thread_specific_region_size = align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) + sizeof ( ThreadSpecificData ) ;
auto * region = process ( ) . allocate_region ( { } , thread_specific_region_size , " Thread-specific " , PROT_READ | PROT_WRITE , true ) ;
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
m_thread_specific_data = VirtualAddress ( ( u32 ) thread_specific_data ) ;
thread_specific_data - > self = thread_specific_data ;
2019-09-07 15:06:25 +00:00
if ( process ( ) . m_master_tls_size )
memcpy ( thread_local_storage , process ( ) . m_master_tls_region - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2019-09-07 13:50:44 +00:00
}
2019-10-13 12:36:55 +00:00
const LogStream & operator < < ( const LogStream & stream , const Thread & value )
{
return stream < < value . process ( ) . name ( ) < < " ( " < < value . pid ( ) < < " : " < < value . tid ( ) < < " ) " ;
}