Revert "Kernel: Make sure threads which don't do any syscalls are t..."

This reverts commit 3c3a1726df.

We cannot blindly kill threads just because they're not executing in a
system call. Being blocked (including in a page fault) needs proper
unblocking and potentially kernel stack cleanup before we can mark a
thread as Dying.

Fixes #8691
This commit is contained in:
Tom 2021-07-13 10:11:33 -06:00 committed by Andreas Kling
parent 552185066e
commit fa8fe40266
Notes: sideshowbarker 2024-07-18 09:07:56 +09:00
3 changed files with 0 additions and 19 deletions

View file

@ -217,13 +217,6 @@ bool Scheduler::pick_next()
ScopedSpinLock lock(g_scheduler_lock);
auto current_thread = Thread::current();
if (current_thread->should_die() && current_thread->may_die_immediately()) {
// Ordinarily the thread would die on syscall exit, however if the thread
// doesn't perform any syscalls we still need to mark it for termination here.
current_thread->set_state(Thread::Dying);
}
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
dump_thread_list();
}

View file

@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/ScopeGuard.h>
#include <Kernel/API/Syscall.h>
#include <Kernel/Arch/x86/Interrupts.h>
#include <Kernel/Arch/x86/TrapFrame.h>
@ -154,14 +153,6 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
{
auto& regs = *trap->regs;
auto current_thread = Thread::current();
{
ScopedSpinLock lock(g_scheduler_lock);
current_thread->set_may_die_immediately(false);
}
ScopeGuard reset_may_die_immediately = [&current_thread] {
ScopedSpinLock lock(g_scheduler_lock);
current_thread->set_may_die_immediately(true);
};
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
auto& process = current_thread->process();

View file

@ -1187,8 +1187,6 @@ public:
bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
bool may_die_immediately() const { return m_may_die_immediately; }
void set_may_die_immediately(bool flag) { m_may_die_immediately = flag; }
InodeIndex global_procfs_inode_index() const { return m_global_procfs_inode_index; }
private:
@ -1287,7 +1285,6 @@ private:
Kernel::Lock* m_blocking_lock { nullptr };
u32 m_lock_requested_count { 0 };
IntrusiveListNode<Thread> m_blocked_threads_list_node;
bool m_may_die_immediately { true };
#if LOCK_DEBUG
struct HoldingLockInfo {