2019-06-07 07:36:51 +00:00
# include <Kernel/FileSystem/FileDescription.h>
2019-06-07 09:43:58 +00:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
# include <Kernel/Thread.h>
2019-04-03 13:13:07 +00:00
# include <Kernel/VM/MemoryManager.h>
2019-03-23 21:03:17 +00:00
# include <LibC/signal_numbers.h>
2019-05-22 11:23:41 +00:00
//#define SIGNAL_DEBUG
2019-05-18 16:31:36 +00:00
HashTable < Thread * > & thread_table ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
static HashTable < Thread * > * table ;
if ( ! table )
table = new HashTable < Thread * > ;
return * table ;
}
InlineLinkedList < Thread > * g_runnable_threads ;
InlineLinkedList < Thread > * g_nonrunnable_threads ;
2019-07-03 19:17:35 +00:00
static const u32 default_kernel_stack_size = 65536 ;
static const u32 default_userspace_stack_size = 65536 ;
2019-03-23 21:03:17 +00:00
Thread : : Thread ( Process & process )
: m_process ( process )
, m_tid ( process . m_next_tid + + )
{
2019-04-23 20:17:01 +00:00
dbgprintf ( " Thread{%p}: New thread TID=%u in %s(%u) \n " , this , m_tid , process . name ( ) . characters ( ) , process . pid ( ) ) ;
2019-03-23 21:03:17 +00:00
set_default_signal_dispositions ( ) ;
2019-03-27 14:27:45 +00:00
m_fpu_state = ( FPUState * ) kmalloc_aligned ( sizeof ( FPUState ) , 16 ) ;
2019-03-23 21:03:17 +00:00
memset ( & m_tss , 0 , sizeof ( m_tss ) ) ;
// Only IF is set when a process boots.
m_tss . eflags = 0x0202 ;
2019-07-03 19:17:35 +00:00
u16 cs , ds , ss ;
2019-03-23 21:03:17 +00:00
if ( m_process . is_ring0 ( ) ) {
cs = 0x08 ;
ds = 0x10 ;
ss = 0x10 ;
} else {
cs = 0x1b ;
ds = 0x23 ;
ss = 0x23 ;
}
m_tss . ds = ds ;
m_tss . es = ds ;
m_tss . fs = ds ;
m_tss . gs = ds ;
m_tss . ss = ss ;
m_tss . cs = cs ;
m_tss . cr3 = m_process . page_directory ( ) . cr3 ( ) ;
if ( m_process . is_ring0 ( ) ) {
// FIXME: This memory is leaked.
// But uh, there's also no kernel process termination, so I guess it's not technically leaked...
2019-07-03 19:17:35 +00:00
m_kernel_stack_base = ( u32 ) kmalloc_eternal ( default_kernel_stack_size ) ;
2019-05-17 01:43:51 +00:00
m_tss . esp = ( m_kernel_stack_base + default_kernel_stack_size ) & 0xfffffff8u ;
2019-03-23 21:03:17 +00:00
} else {
// Ring3 processes need a separate stack for Ring0.
2019-05-14 09:51:00 +00:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid ) ) ;
2019-06-07 10:56:50 +00:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
2019-03-23 21:03:17 +00:00
m_tss . ss0 = 0x10 ;
2019-06-07 10:56:50 +00:00
m_tss . esp0 = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2019-03-23 21:03:17 +00:00
}
// HACK: Ring2 SS in the TSS is the current PID.
m_tss . ss2 = m_process . pid ( ) ;
m_far_ptr . offset = 0x98765432 ;
2019-03-23 21:17:38 +00:00
if ( m_process . pid ( ) ! = 0 ) {
InterruptDisabler disabler ;
2019-05-18 16:31:36 +00:00
thread_table ( ) . set ( this ) ;
2019-05-18 18:07:00 +00:00
set_thread_list ( g_nonrunnable_threads ) ;
2019-03-23 21:17:38 +00:00
}
2019-03-23 21:03:17 +00:00
}
Thread : : ~ Thread ( )
{
dbgprintf ( " ~Thread{%p} \n " , this ) ;
2019-03-27 14:27:45 +00:00
kfree_aligned ( m_fpu_state ) ;
2019-03-23 21:03:17 +00:00
{
InterruptDisabler disabler ;
2019-05-18 16:31:36 +00:00
if ( m_thread_list )
m_thread_list - > remove ( this ) ;
thread_table ( ) . remove ( this ) ;
2019-03-23 21:03:17 +00:00
}
if ( g_last_fpu_thread = = this )
g_last_fpu_thread = nullptr ;
if ( selector ( ) )
gdt_free_entry ( selector ( ) ) ;
}
void Thread : : unblock ( )
{
2019-07-18 14:22:26 +00:00
m_blocker = nullptr ;
2019-03-23 21:03:17 +00:00
if ( current = = this ) {
2019-05-18 16:31:36 +00:00
set_state ( Thread : : Running ) ;
2019-03-23 21:03:17 +00:00
return ;
}
ASSERT ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
2019-05-18 16:31:36 +00:00
set_state ( Thread : : Runnable ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-19 07:51:48 +00:00
void Thread : : block_until ( const char * state_string , Function < bool ( ) > & & condition )
2019-03-23 21:03:17 +00:00
{
2019-07-19 07:51:48 +00:00
m_blocker = make < ConditionBlocker > ( state_string , condition ) ;
2019-07-19 07:37:34 +00:00
block ( Thread : : Blocked ) ;
2019-03-23 21:03:17 +00:00
Scheduler : : yield ( ) ;
}
void Thread : : block ( Thread : : State new_state )
{
2019-04-01 18:02:05 +00:00
bool did_unlock = process ( ) . big_lock ( ) . unlock_if_locked ( ) ;
2019-03-23 21:03:17 +00:00
ASSERT ( state ( ) = = Thread : : Running ) ;
m_was_interrupted_while_blocked = false ;
set_state ( new_state ) ;
Scheduler : : yield ( ) ;
2019-04-01 18:02:05 +00:00
if ( did_unlock )
process ( ) . big_lock ( ) . lock ( ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-18 16:12:37 +00:00
void Thread : : block ( Blocker & blocker )
2019-05-03 18:15:54 +00:00
{
2019-07-18 14:22:26 +00:00
m_blocker = & blocker ;
2019-07-19 07:37:34 +00:00
block ( Thread : : Blocked ) ;
2019-05-03 18:15:54 +00:00
}
2019-07-18 15:26:11 +00:00
u64 Thread : : sleep ( u32 ticks )
2019-03-23 21:03:17 +00:00
{
2019-03-24 00:52:10 +00:00
ASSERT ( state ( ) = = Thread : : Running ) ;
2019-07-18 15:26:11 +00:00
u64 wakeup_time = g_uptime + ticks ;
2019-07-18 16:12:37 +00:00
current - > block ( * new Thread : : SleepBlocker ( wakeup_time ) ) ;
2019-07-18 15:26:11 +00:00
return wakeup_time ;
2019-03-23 21:03:17 +00:00
}
2019-07-19 07:51:48 +00:00
const char * Thread : : state_string ( ) const
2019-03-23 21:03:17 +00:00
{
2019-07-19 07:51:48 +00:00
switch ( state ( ) ) {
2019-06-07 09:43:58 +00:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
case Thread : : Skip1SchedulerPass :
return " Skip1 " ;
case Thread : : Skip0SchedulerPasses :
return " Skip0 " ;
2019-07-19 07:37:34 +00:00
case Thread : : Blocked :
2019-07-19 07:51:48 +00:00
ASSERT ( m_blocker ) ;
return m_blocker - > state_string ( ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-19 07:51:48 +00:00
kprintf ( " to_string(Thread::State): Invalid state: %u \n " , state ( ) ) ;
2019-03-23 21:03:17 +00:00
ASSERT_NOT_REACHED ( ) ;
return nullptr ;
}
void Thread : : finalize ( )
{
dbgprintf ( " Finalizing Thread %u in %s(%u) \n " , tid ( ) , m_process . name ( ) . characters ( ) , pid ( ) ) ;
set_state ( Thread : : State : : Dead ) ;
2019-07-18 14:22:26 +00:00
m_blocker = nullptr ;
2019-05-03 18:15:54 +00:00
2019-03-23 21:03:17 +00:00
if ( this = = & m_process . main_thread ( ) )
m_process . finalize ( ) ;
}
void Thread : : finalize_dying_threads ( )
{
2019-04-20 12:02:19 +00:00
Vector < Thread * , 32 > dying_threads ;
2019-03-23 21:03:17 +00:00
{
InterruptDisabler disabler ;
2019-06-07 09:43:58 +00:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2019-03-23 21:03:17 +00:00
dying_threads . append ( & thread ) ;
} ) ;
}
for ( auto * thread : dying_threads )
thread - > finalize ( ) ;
}
bool Thread : : tick ( )
{
+ + m_ticks ;
if ( tss ( ) . cs & 3 )
+ + m_process . m_ticks_in_user ;
else
+ + m_process . m_ticks_in_kernel ;
return - - m_ticks_left ;
}
2019-07-03 19:17:35 +00:00
void Thread : : send_signal ( u8 signal , Process * sender )
2019-03-23 21:03:17 +00:00
{
ASSERT ( signal < 32 ) ;
2019-07-08 16:59:48 +00:00
InterruptDisabler disabler ;
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
dbg ( ) < < " signal " < < signal < < " was ignored by " < < process ( ) ;
return ;
}
2019-03-23 21:03:17 +00:00
if ( sender )
dbgprintf ( " signal: %s(%u) sent %d to %s(%u) \n " , sender - > name ( ) . characters ( ) , sender - > pid ( ) , signal , process ( ) . name ( ) . characters ( ) , pid ( ) ) ;
else
dbgprintf ( " signal: kernel sent %d to %s(%u) \n " , signal , process ( ) . name ( ) . characters ( ) , pid ( ) ) ;
m_pending_signals | = 1 < < signal ;
}
bool Thread : : has_unmasked_pending_signals ( ) const
{
return m_pending_signals & ~ m_signal_mask ;
}
ShouldUnblockThread Thread : : dispatch_one_pending_signal ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-07-03 19:17:35 +00:00
u32 signal_candidates = m_pending_signals & ~ m_signal_mask ;
2019-03-23 21:03:17 +00:00
ASSERT ( signal_candidates ) ;
2019-07-03 19:17:35 +00:00
u8 signal = 0 ;
2019-03-23 21:03:17 +00:00
for ( ; signal < 32 ; + + signal ) {
if ( signal_candidates & ( 1 < < signal ) ) {
break ;
}
}
return dispatch_signal ( signal ) ;
}
2019-06-07 15:13:23 +00:00
enum class DefaultSignalAction {
2019-03-23 21:03:17 +00:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
2019-07-03 19:17:35 +00:00
DefaultSignalAction default_signal_action ( u8 signal )
2019-03-23 21:03:17 +00:00
{
ASSERT ( signal & & signal < NSIG ) ;
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
case SIGPWR :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-07-08 16:59:48 +00:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-07-03 19:17:35 +00:00
ShouldUnblockThread Thread : : dispatch_signal ( u8 signal )
2019-03-23 21:03:17 +00:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( signal < 32 ) ;
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " dispatch_signal %s(%u) <- %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-23 21:03:17 +00:00
# endif
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT ( ! ( action . flags & SA_SIGINFO ) ) ;
// Mark this signal as handled.
m_pending_signals & = ~ ( 1 < < signal ) ;
if ( signal = = SIGSTOP ) {
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
}
if ( signal = = SIGCONT & & state ( ) = = Stopped )
set_state ( Runnable ) ;
2019-06-07 10:56:50 +00:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-23 21:03:17 +00:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : DumpCore :
case DefaultSignalAction : : Terminate :
m_process . terminate_due_to_signal ( signal ) ;
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : Ignore :
2019-07-19 07:34:11 +00:00
ASSERT_NOT_REACHED ( ) ;
2019-03-23 21:03:17 +00:00
case DefaultSignalAction : : Continue :
return ShouldUnblockThread : : Yes ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-06-07 10:56:50 +00:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " %s(%u) ignored signal %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-23 21:03:17 +00:00
# endif
return ShouldUnblockThread : : Yes ;
}
2019-07-03 19:17:35 +00:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-23 21:03:17 +00:00
if ( action . flags & SA_NODEFER )
new_signal_mask & = ~ ( 1 < < signal ) ;
else
new_signal_mask | = 1 < < signal ;
m_signal_mask | = new_signal_mask ;
Scheduler : : prepare_to_modify_tss ( * this ) ;
2019-07-03 19:17:35 +00:00
u16 ret_cs = m_tss . cs ;
u32 ret_eip = m_tss . eip ;
u32 ret_eflags = m_tss . eflags ;
2019-03-23 21:03:17 +00:00
bool interrupting_in_kernel = ( ret_cs & 3 ) = = 0 ;
ProcessPagingScope paging_scope ( m_process ) ;
m_process . create_signal_trampolines_if_needed ( ) ;
if ( interrupting_in_kernel ) {
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " dispatch_signal to %s(%u) in state=%s with return to %w:%x \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , to_string ( state ( ) ) , ret_cs , ret_eip ) ;
2019-03-23 21:03:17 +00:00
# endif
ASSERT ( is_blocked ( ) ) ;
2019-04-20 17:23:45 +00:00
m_tss_to_resume_kernel = make < TSS32 > ( m_tss ) ;
2019-03-23 21:03:17 +00:00
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " resume tss pc: %w:%x stack: %w:%x flags: %x cr3: %x \n " , m_tss_to_resume_kernel - > cs , m_tss_to_resume_kernel - > eip , m_tss_to_resume_kernel - > ss , m_tss_to_resume_kernel - > esp , m_tss_to_resume_kernel - > eflags , m_tss_to_resume_kernel - > cr3 ) ;
2019-03-23 21:03:17 +00:00
# endif
if ( ! m_signal_stack_user_region ) {
2019-06-07 10:56:50 +00:00
m_signal_stack_user_region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , String : : format ( " User Signal Stack (Thread %d) " , m_tid ) ) ;
2019-03-23 21:03:17 +00:00
ASSERT ( m_signal_stack_user_region ) ;
}
2019-05-14 13:36:24 +00:00
if ( ! m_kernel_stack_for_signal_handler_region )
m_kernel_stack_for_signal_handler_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Signal Stack (Thread %d) " , m_tid ) ) ;
2019-03-23 21:03:17 +00:00
m_tss . ss = 0x23 ;
2019-06-07 10:56:50 +00:00
m_tss . esp = m_signal_stack_user_region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
2019-03-23 21:03:17 +00:00
m_tss . ss0 = 0x10 ;
2019-06-07 10:56:50 +00:00
m_tss . esp0 = m_kernel_stack_for_signal_handler_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) ;
2019-03-23 21:03:17 +00:00
push_value_on_stack ( 0 ) ;
} else {
push_value_on_stack ( ret_eip ) ;
push_value_on_stack ( ret_eflags ) ;
// PUSHA
2019-07-03 19:17:35 +00:00
u32 old_esp = m_tss . esp ;
2019-03-23 21:03:17 +00:00
push_value_on_stack ( m_tss . eax ) ;
push_value_on_stack ( m_tss . ecx ) ;
push_value_on_stack ( m_tss . edx ) ;
push_value_on_stack ( m_tss . ebx ) ;
push_value_on_stack ( old_esp ) ;
push_value_on_stack ( m_tss . ebp ) ;
push_value_on_stack ( m_tss . esi ) ;
push_value_on_stack ( m_tss . edi ) ;
// Align the stack.
m_tss . esp - = 12 ;
}
// PUSH old_signal_mask
push_value_on_stack ( old_signal_mask ) ;
m_tss . cs = 0x1b ;
m_tss . ds = 0x23 ;
m_tss . es = 0x23 ;
m_tss . fs = 0x23 ;
m_tss . gs = 0x23 ;
2019-06-07 10:56:50 +00:00
m_tss . eip = handler_vaddr . get ( ) ;
2019-03-23 21:03:17 +00:00
// FIXME: Should we worry about the stack being 16 byte aligned when entering a signal handler?
push_value_on_stack ( signal ) ;
if ( interrupting_in_kernel )
push_value_on_stack ( m_process . m_return_to_ring0_from_signal_trampoline . get ( ) ) ;
else
push_value_on_stack ( m_process . m_return_to_ring3_from_signal_trampoline . get ( ) ) ;
ASSERT ( ( m_tss . esp % 16 ) = = 0 ) ;
// FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
set_state ( Skip1SchedulerPass ) ;
# ifdef SIGNAL_DEBUG
2019-05-22 11:23:41 +00:00
kprintf ( " signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , to_string ( state ( ) ) , m_tss . cs , m_tss . eip ) ;
2019-03-23 21:03:17 +00:00
# endif
return ShouldUnblockThread : : Yes ;
}
void Thread : : set_default_signal_dispositions ( )
{
// FIXME: Set up all the right default actions. See signal(7).
memset ( & m_signal_action_data , 0 , sizeof ( m_signal_action_data ) ) ;
2019-07-03 19:17:35 +00:00
m_signal_action_data [ SIGCHLD ] . handler_or_sigaction = VirtualAddress ( ( u32 ) SIG_IGN ) ;
m_signal_action_data [ SIGWINCH ] . handler_or_sigaction = VirtualAddress ( ( u32 ) SIG_IGN ) ;
2019-03-23 21:03:17 +00:00
}
2019-07-03 19:17:35 +00:00
void Thread : : push_value_on_stack ( u32 value )
2019-03-23 21:03:17 +00:00
{
m_tss . esp - = 4 ;
2019-07-03 19:17:35 +00:00
u32 * stack_ptr = ( u32 * ) m_tss . esp ;
2019-03-23 21:03:17 +00:00
* stack_ptr = value ;
}
2019-03-23 21:59:08 +00:00
void Thread : : make_userspace_stack_for_main_thread ( Vector < String > arguments , Vector < String > environment )
2019-03-23 21:03:17 +00:00
{
2019-06-07 10:56:50 +00:00
auto * region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , " Stack (Main thread) " ) ;
2019-03-23 21:03:17 +00:00
ASSERT ( region ) ;
2019-06-07 10:56:50 +00:00
m_tss . esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
2019-03-23 21:03:17 +00:00
2019-06-07 10:56:50 +00:00
char * stack_base = ( char * ) region - > vaddr ( ) . get ( ) ;
2019-03-23 21:03:17 +00:00
int argc = arguments . size ( ) ;
char * * argv = ( char * * ) stack_base ;
char * * env = argv + arguments . size ( ) + 1 ;
char * bufptr = stack_base + ( sizeof ( char * ) * ( arguments . size ( ) + 1 ) ) + ( sizeof ( char * ) * ( environment . size ( ) + 1 ) ) ;
size_t total_blob_size = 0 ;
for ( auto & a : arguments )
total_blob_size + = a . length ( ) + 1 ;
for ( auto & e : environment )
total_blob_size + = e . length ( ) + 1 ;
size_t total_meta_size = sizeof ( char * ) * ( arguments . size ( ) + 1 ) + sizeof ( char * ) * ( environment . size ( ) + 1 ) ;
// FIXME: It would be better if this didn't make us panic.
ASSERT ( ( total_blob_size + total_meta_size ) < default_userspace_stack_size ) ;
for ( int i = 0 ; i < arguments . size ( ) ; + + i ) {
argv [ i ] = bufptr ;
memcpy ( bufptr , arguments [ i ] . characters ( ) , arguments [ i ] . length ( ) ) ;
bufptr + = arguments [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
argv [ arguments . size ( ) ] = nullptr ;
for ( int i = 0 ; i < environment . size ( ) ; + + i ) {
env [ i ] = bufptr ;
memcpy ( bufptr , environment [ i ] . characters ( ) , environment [ i ] . length ( ) ) ;
bufptr + = environment [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
env [ environment . size ( ) ] = nullptr ;
// NOTE: The stack needs to be 16-byte aligned.
2019-07-03 19:17:35 +00:00
push_value_on_stack ( ( u32 ) env ) ;
push_value_on_stack ( ( u32 ) argv ) ;
push_value_on_stack ( ( u32 ) argc ) ;
2019-03-23 21:03:17 +00:00
push_value_on_stack ( 0 ) ;
}
2019-06-07 09:43:58 +00:00
void Thread : : make_userspace_stack_for_secondary_thread ( void * argument )
2019-03-23 21:59:08 +00:00
{
2019-06-07 10:56:50 +00:00
auto * region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , String : : format ( " Stack (Thread %d) " , tid ( ) ) ) ;
2019-03-23 21:59:08 +00:00
ASSERT ( region ) ;
2019-06-07 10:56:50 +00:00
m_tss . esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
2019-03-23 21:59:08 +00:00
// NOTE: The stack needs to be 16-byte aligned.
2019-07-03 19:17:35 +00:00
push_value_on_stack ( ( u32 ) argument ) ;
2019-03-23 21:59:08 +00:00
push_value_on_stack ( 0 ) ;
}
2019-03-23 21:03:17 +00:00
Thread * Thread : : clone ( Process & process )
{
auto * clone = new Thread ( process ) ;
memcpy ( clone - > m_signal_action_data , m_signal_action_data , sizeof ( m_signal_action_data ) ) ;
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 14:27:45 +00:00
clone - > m_fpu_state = ( FPUState * ) kmalloc_aligned ( sizeof ( FPUState ) , 16 ) ;
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-03-23 21:03:17 +00:00
clone - > m_has_used_fpu = m_has_used_fpu ;
return clone ;
}
2019-06-13 20:03:04 +00:00
KResult Thread : : wait_for_connect ( FileDescription & description )
2019-03-23 21:03:17 +00:00
{
2019-06-13 20:03:04 +00:00
ASSERT ( description . is_socket ( ) ) ;
auto & socket = * description . socket ( ) ;
2019-03-23 21:03:17 +00:00
if ( socket . is_connected ( ) )
return KSuccess ;
2019-07-18 16:12:37 +00:00
block ( * new Thread : : ConnectBlocker ( description ) ) ;
2019-03-23 21:03:17 +00:00
Scheduler : : yield ( ) ;
if ( ! socket . is_connected ( ) )
return KResult ( - ECONNREFUSED ) ;
return KSuccess ;
}
void Thread : : initialize ( )
{
2019-05-18 16:31:36 +00:00
g_runnable_threads = new InlineLinkedList < Thread > ;
g_nonrunnable_threads = new InlineLinkedList < Thread > ;
2019-03-23 21:03:17 +00:00
Scheduler : : initialize ( ) ;
}
2019-03-23 22:50:34 +00:00
Vector < Thread * > Thread : : all_threads ( )
{
Vector < Thread * > threads ;
InterruptDisabler disabler ;
2019-05-18 16:31:36 +00:00
threads . ensure_capacity ( thread_table ( ) . size ( ) ) ;
for ( auto * thread : thread_table ( ) )
threads . unchecked_append ( thread ) ;
2019-03-23 22:50:34 +00:00
return threads ;
}
2019-04-17 10:41:51 +00:00
bool Thread : : is_thread ( void * ptr )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-05-18 16:31:36 +00:00
return thread_table ( ) . contains ( ( Thread * ) ptr ) ;
}
2019-05-18 18:07:00 +00:00
void Thread : : set_thread_list ( InlineLinkedList < Thread > * thread_list )
2019-05-18 16:31:36 +00:00
{
2019-06-30 09:40:23 +00:00
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-05-18 18:24:55 +00:00
ASSERT ( pid ( ) ! = 0 ) ;
2019-05-18 18:07:00 +00:00
if ( m_thread_list = = thread_list )
2019-05-18 16:31:36 +00:00
return ;
if ( m_thread_list )
m_thread_list - > remove ( this ) ;
2019-05-18 18:07:00 +00:00
if ( thread_list )
thread_list - > append ( this ) ;
m_thread_list = thread_list ;
}
void Thread : : set_state ( State new_state )
{
2019-06-30 09:40:23 +00:00
InterruptDisabler disabler ;
2019-05-18 18:07:00 +00:00
m_state = new_state ;
2019-05-18 18:24:55 +00:00
if ( m_process . pid ( ) ! = 0 )
set_thread_list ( thread_list_for_state ( new_state ) ) ;
2019-04-17 10:41:51 +00:00
}