Kernel: Clean up around Scheduler::yield() a bit.

Also add assertion in Lock that the scheduler isn't currently active.
I've been seeing occasional fuckups that I suspect might be someone called
by the scheduler trying to take a busy lock.
This commit is contained in:
Andreas Kling 2019-02-06 15:05:47 +01:00
parent 27263b6172
commit 8cc6e304ca
Notes: sideshowbarker 2024-07-19 15:51:03 +09:00
4 changed files with 23 additions and 28 deletions

View file

@ -3,7 +3,7 @@
#include "Assertions.h"
#include "Types.h"
#include "i386.h"
int sched_yield();
#include <Kernel/Scheduler.h>
class Process;
extern Process* current;
@ -52,6 +52,7 @@ private:
inline void Lock::lock()
{
ASSERT(!Scheduler::is_active());
for (;;) {
if (CAS(&m_lock, 1, 0) == 0) {
if (!m_holder || m_holder == current) {
@ -63,7 +64,7 @@ inline void Lock::lock()
}
m_lock = 0;
}
sched_yield();
Scheduler::yield();
}
}
@ -84,7 +85,7 @@ inline void Lock::unlock()
m_lock = 0;
return;
}
sched_yield();
Scheduler::yield();
}
}

View file

@ -1109,7 +1109,7 @@ ssize_t Process::sys$read(int fd, void* outbuf, size_t nread)
if (!descriptor->can_read(*this)) {
m_blocked_fd = fd;
block(BlockedRead);
sched_yield();
Scheduler::yield();
if (m_was_interrupted_while_blocked)
return -EINTR;
}
@ -1572,7 +1572,7 @@ pid_t Process::sys$waitpid(pid_t waitee, int* wstatus, int options)
m_waitee_pid = waitee;
block(BlockedWait);
sched_yield();
Scheduler::yield();
if (m_was_interrupted_while_blocked)
return -EINTR;
Process* waitee_process;
@ -1612,7 +1612,7 @@ void Process::block(Process::State new_state)
void block(Process::State state)
{
current->block(state);
sched_yield();
Scheduler::yield();
}
void sleep(dword ticks)
@ -1620,7 +1620,7 @@ void sleep(dword ticks)
ASSERT(current->state() == Process::Running);
current->set_wakeup_time(system.uptime + ticks);
current->block(Process::BlockedSleep);
sched_yield();
Scheduler::yield();
}
static bool is_inside_kernel_code(LinearAddress laddr)

View file

@ -3,6 +3,7 @@
#include "system.h"
#include "RTC.h"
#include "i8253.h"
#include <AK/TemporaryChange.h>
//#define LOG_EVERY_CONTEXT_SWITCH
//#define SCHEDULER_DEBUG
@ -12,17 +13,27 @@ static const dword time_slice = 5; // *10 = 50ms
Process* current;
Process* g_last_fpu_process;
static Process* s_colonel_process;
static volatile bool s_in_yield;
struct TaskRedirectionData {
word selector;
TSS32 tss;
};
static TaskRedirectionData s_redirection;
static bool s_active;
bool Scheduler::is_active()
{
return s_active;
}
bool Scheduler::pick_next()
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(!s_active);
TemporaryChange<bool> change(s_active, true);
ASSERT(s_active);
if (!current) {
// XXX: The first ever context_switch() goes to the idle process.
@ -176,22 +187,12 @@ bool Scheduler::pick_next()
bool Scheduler::yield()
{
InterruptDisabler disabler;
ASSERT(!s_in_yield);
s_in_yield = true;
if (!current) {
kprintf("PANIC: sched_yield() with !current");
HANG;
}
ASSERT(current);
//dbgprintf("%s<%u> yield()\n", current->name().characters(), current->pid());
if (!pick_next()) {
s_in_yield = false;
if (!pick_next())
return 1;
}
s_in_yield = false;
//dbgprintf("yield() jumping to new process: %x (%s)\n", current->far_ptr().selector, current->name().characters());
switch_now();
return 0;
@ -269,11 +270,6 @@ bool Scheduler::context_switch(Process& process)
return true;
}
int sched_yield()
{
return Scheduler::yield();
}
static void initialize_redirection()
{
auto& descriptor = get_gdt_entry(s_redirection.selector);
@ -319,7 +315,6 @@ void Scheduler::initialize()
s_colonel_process = Process::create_kernel_process("colonel", nullptr);
current = nullptr;
g_last_fpu_process = nullptr;
s_in_yield = false;
load_task_register(s_redirection.selector);
}

View file

@ -19,8 +19,7 @@ public:
static bool context_switch(Process&);
static void prepare_to_modify_tss(Process&);
static Process* colonel();
static bool is_active();
private:
static void prepare_for_iret_to_new_process();
};
int sched_yield();