2018-10-16 09:01:38 +00:00
|
|
|
#include "types.h"
|
|
|
|
#include "Task.h"
|
|
|
|
#include "kmalloc.h"
|
|
|
|
#include "VGA.h"
|
|
|
|
#include "StdLib.h"
|
|
|
|
#include "i386.h"
|
|
|
|
#include "system.h"
|
2018-10-18 08:28:09 +00:00
|
|
|
#include <VirtualFileSystem/FileHandle.h>
|
|
|
|
#include <VirtualFileSystem/VirtualFileSystem.h>
|
2018-10-22 13:42:39 +00:00
|
|
|
#include <ELFLoader/ExecSpace.h>
|
2018-10-17 21:13:55 +00:00
|
|
|
#include "MemoryManager.h"
|
2018-10-25 10:06:00 +00:00
|
|
|
#include "errno.h"
|
2018-10-25 11:53:49 +00:00
|
|
|
#include "i8253.h"
|
2018-10-25 15:29:49 +00:00
|
|
|
#include "RTC.h"
|
2018-10-26 15:42:12 +00:00
|
|
|
#include "ProcFileSystem.h"
|
2018-10-27 12:56:52 +00:00
|
|
|
#include <AK/StdLib.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-23 08:12:50 +00:00
|
|
|
//#define DEBUG_IO
|
2018-10-23 22:20:34 +00:00
|
|
|
//#define TASK_DEBUG
|
|
|
|
|
|
|
|
static const DWORD defaultStackSize = 16384;
|
2018-10-23 08:12:50 +00:00
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
Task* current;
|
2018-10-16 09:06:35 +00:00
|
|
|
Task* s_kernelTask;
|
2018-10-16 09:01:38 +00:00
|
|
|
|
|
|
|
static pid_t next_pid;
|
2018-10-17 08:55:43 +00:00
|
|
|
static InlineLinkedList<Task>* s_tasks;
|
2018-10-23 13:41:55 +00:00
|
|
|
static InlineLinkedList<Task>* s_deadTasks;
|
2018-10-26 07:54:29 +00:00
|
|
|
static String* s_hostname;
|
|
|
|
|
2018-10-26 12:56:21 +00:00
|
|
|
static String& hostnameStorage(InterruptDisabler&)
|
2018-10-26 07:54:29 +00:00
|
|
|
{
|
|
|
|
ASSERT(s_hostname);
|
|
|
|
return *s_hostname;
|
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-26 12:56:21 +00:00
|
|
|
static String getHostname()
|
|
|
|
{
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
return hostnameStorage(disabler).isolatedCopy();
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
static bool contextSwitch(Task*);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
static void redoKernelTaskTSS()
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2018-10-16 09:06:35 +00:00
|
|
|
if (!s_kernelTask->selector())
|
|
|
|
s_kernelTask->setSelector(allocateGDTEntry());
|
|
|
|
|
|
|
|
auto& tssDescriptor = getGDTEntry(s_kernelTask->selector());
|
|
|
|
|
|
|
|
tssDescriptor.setBase(&s_kernelTask->tss());
|
|
|
|
tssDescriptor.setLimit(0xffff);
|
|
|
|
tssDescriptor.dpl = 0;
|
|
|
|
tssDescriptor.segment_present = 1;
|
|
|
|
tssDescriptor.granularity = 1;
|
|
|
|
tssDescriptor.zero = 0;
|
|
|
|
tssDescriptor.operation_size = 1;
|
|
|
|
tssDescriptor.descriptor_type = 0;
|
|
|
|
tssDescriptor.type = 9;
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
flushGDT();
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
void Task::prepForIRETToNewTask()
|
|
|
|
{
|
|
|
|
redoKernelTaskTSS();
|
|
|
|
s_kernelTask->tss().backlink = current->selector();
|
|
|
|
loadTaskRegister(s_kernelTask->selector());
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
void Task::initialize()
|
|
|
|
{
|
|
|
|
current = nullptr;
|
|
|
|
next_pid = 0;
|
2018-10-17 08:55:43 +00:00
|
|
|
s_tasks = new InlineLinkedList<Task>;
|
2018-10-23 13:41:55 +00:00
|
|
|
s_deadTasks = new InlineLinkedList<Task>;
|
2018-10-25 09:15:17 +00:00
|
|
|
s_kernelTask = Task::createKernelTask(nullptr, "colonel");
|
2018-10-26 07:54:29 +00:00
|
|
|
s_hostname = new String("birx");
|
2018-10-16 09:06:35 +00:00
|
|
|
redoKernelTaskTSS();
|
|
|
|
loadTaskRegister(s_kernelTask->selector());
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TASK_SANITY_CHECKS
|
|
|
|
void Task::checkSanity(const char* msg)
|
|
|
|
{
|
|
|
|
char ch = current->name()[0];
|
|
|
|
kprintf("<%p> %s{%u}%b [%d] :%b: sanity check <%s>\n",
|
|
|
|
current->name().characters(),
|
|
|
|
current->name().characters(),
|
|
|
|
current->name().length(),
|
|
|
|
current->name()[current->name().length() - 1],
|
|
|
|
current->pid(), ch, msg ? msg : "");
|
|
|
|
ASSERT((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void Task::allocateLDT()
|
|
|
|
{
|
|
|
|
ASSERT(!m_tss.ldt);
|
|
|
|
static const WORD numLDTEntries = 4;
|
|
|
|
WORD newLDTSelector = allocateGDTEntry();
|
|
|
|
m_ldtEntries = new Descriptor[numLDTEntries];
|
2018-10-18 12:53:00 +00:00
|
|
|
#if 0
|
2018-10-16 09:01:38 +00:00
|
|
|
kprintf("new ldt selector = %x\n", newLDTSelector);
|
|
|
|
kprintf("new ldt table at = %p\n", m_ldtEntries);
|
|
|
|
kprintf("new ldt table size = %u\n", (numLDTEntries * 8) - 1);
|
|
|
|
#endif
|
|
|
|
Descriptor& ldt = getGDTEntry(newLDTSelector);
|
|
|
|
ldt.setBase(m_ldtEntries);
|
|
|
|
ldt.setLimit(numLDTEntries * 8 - 1);
|
|
|
|
ldt.dpl = 0;
|
|
|
|
ldt.segment_present = 1;
|
|
|
|
ldt.granularity = 0;
|
|
|
|
ldt.zero = 0;
|
|
|
|
ldt.operation_size = 1;
|
|
|
|
ldt.descriptor_type = 0;
|
|
|
|
ldt.type = Descriptor::LDT;
|
|
|
|
m_tss.ldt = newLDTSelector;
|
|
|
|
}
|
|
|
|
|
2018-10-23 10:44:46 +00:00
|
|
|
Vector<Task*> Task::allTasks()
|
|
|
|
{
|
2018-10-25 08:33:10 +00:00
|
|
|
InterruptDisabler disabler;
|
2018-10-23 10:44:46 +00:00
|
|
|
Vector<Task*> tasks;
|
|
|
|
tasks.ensureCapacity(s_tasks->sizeSlow());
|
|
|
|
for (auto* task = s_tasks->head(); task; task = task->next())
|
|
|
|
tasks.append(task);
|
|
|
|
return tasks;
|
|
|
|
}
|
|
|
|
|
2018-10-18 12:53:00 +00:00
|
|
|
Task::Region* Task::allocateRegion(size_t size, String&& name)
|
2018-10-18 11:05:00 +00:00
|
|
|
{
|
2018-10-18 12:53:00 +00:00
|
|
|
// FIXME: This needs sanity checks. What if this overlaps existing regions?
|
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
auto zone = MM.createZone(size);
|
2018-10-18 12:53:00 +00:00
|
|
|
ASSERT(zone);
|
2018-10-27 12:56:52 +00:00
|
|
|
m_regions.append(adopt(*new Region(m_nextRegion, size, move(zone), move(name))));
|
2018-10-18 12:53:00 +00:00
|
|
|
m_nextRegion = m_nextRegion.offset(size).offset(16384);
|
|
|
|
return m_regions.last().ptr();
|
2018-10-18 11:05:00 +00:00
|
|
|
}
|
|
|
|
|
2018-10-24 07:48:24 +00:00
|
|
|
bool Task::deallocateRegion(Region& region)
|
|
|
|
{
|
2018-10-28 08:36:21 +00:00
|
|
|
InterruptDisabler disabler;
|
2018-10-24 07:48:24 +00:00
|
|
|
for (size_t i = 0; i < m_regions.size(); ++i) {
|
|
|
|
if (m_regions[i].ptr() == ®ion) {
|
|
|
|
// FIXME: This seems racy.
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.unmapRegion(*this, region);
|
2018-10-24 07:48:24 +00:00
|
|
|
m_regions.remove(i);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Task::Region* Task::regionFromRange(LinearAddress laddr, size_t size)
|
|
|
|
{
|
|
|
|
for (auto& region : m_regions) {
|
|
|
|
if (region->linearAddress == laddr && region->size == size)
|
|
|
|
return region.ptr();
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* Task::sys$mmap(void* addr, size_t size)
|
|
|
|
{
|
|
|
|
// FIXME: Implement mapping at a client-preferred address.
|
|
|
|
ASSERT(addr == nullptr);
|
|
|
|
auto* region = allocateRegion(size, "mmap");
|
|
|
|
if (!region)
|
|
|
|
return (void*)-1;
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.mapRegion(*this, *region);
|
2018-10-24 07:48:24 +00:00
|
|
|
return (void*)region->linearAddress.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
int Task::sys$munmap(void* addr, size_t size)
|
|
|
|
{
|
|
|
|
auto* region = regionFromRange(LinearAddress((dword)addr), size);
|
|
|
|
if (!region)
|
|
|
|
return -1;
|
|
|
|
if (!deallocateRegion(*region))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-26 22:25:08 +00:00
|
|
|
#define VALIDATE_USER_BUFFER(b, s) \
|
|
|
|
do { \
|
|
|
|
LinearAddress laddr((dword)(b)); \
|
|
|
|
if (!isValidAddressForUser(laddr) || !isValidAddressForUser(laddr.offset((s) - 1))) \
|
|
|
|
return -EFAULT; \
|
|
|
|
} while(0)
|
|
|
|
|
2018-10-26 07:54:29 +00:00
|
|
|
int Task::sys$gethostname(char* buffer, size_t size)
|
|
|
|
{
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(buffer, size);
|
2018-10-26 12:56:21 +00:00
|
|
|
auto hostname = getHostname();
|
|
|
|
if (size < (hostname.length() + 1))
|
2018-10-26 07:54:29 +00:00
|
|
|
return -ENAMETOOLONG;
|
2018-10-26 12:56:21 +00:00
|
|
|
memcpy(buffer, hostname.characters(), size);
|
2018-10-26 09:16:56 +00:00
|
|
|
return 0;
|
2018-10-26 07:54:29 +00:00
|
|
|
}
|
|
|
|
|
2018-10-26 09:16:56 +00:00
|
|
|
int Task::sys$spawn(const char* path, const char** args)
|
2018-10-23 08:12:50 +00:00
|
|
|
{
|
2018-10-25 10:06:00 +00:00
|
|
|
int error = 0;
|
2018-10-26 09:16:56 +00:00
|
|
|
auto* child = Task::createUserTask(path, m_uid, m_gid, m_pid, error, args);
|
2018-10-23 08:12:50 +00:00
|
|
|
if (child)
|
|
|
|
return child->pid();
|
2018-10-25 10:06:00 +00:00
|
|
|
return error;
|
2018-10-23 08:12:50 +00:00
|
|
|
}
|
|
|
|
|
2018-10-26 09:16:56 +00:00
|
|
|
Task* Task::createUserTask(const String& path, uid_t uid, gid_t gid, pid_t parentPID, int& error, const char** args)
|
2018-10-22 13:42:39 +00:00
|
|
|
{
|
|
|
|
auto parts = path.split('/');
|
2018-10-25 10:06:00 +00:00
|
|
|
if (parts.isEmpty()) {
|
|
|
|
error = -ENOENT;
|
2018-10-22 13:42:39 +00:00
|
|
|
return nullptr;
|
2018-10-25 10:06:00 +00:00
|
|
|
}
|
2018-10-22 13:42:39 +00:00
|
|
|
|
2018-10-26 12:24:11 +00:00
|
|
|
RetainPtr<VirtualFileSystem::Node> cwd;
|
|
|
|
{
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
if (auto* parentTask = Task::fromPID(parentPID))
|
|
|
|
cwd = parentTask->m_cwd.copyRef();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto handle = VirtualFileSystem::the().open(path, cwd.ptr());
|
2018-10-25 10:06:00 +00:00
|
|
|
if (!handle) {
|
|
|
|
error = -ENOENT; // FIXME: Get a more detailed error from VFS.
|
2018-10-22 13:42:39 +00:00
|
|
|
return nullptr;
|
2018-10-25 10:06:00 +00:00
|
|
|
}
|
2018-10-22 13:42:39 +00:00
|
|
|
|
2018-10-27 14:43:03 +00:00
|
|
|
if (!handle->metadata().mayExecute(uid, gid)) {
|
|
|
|
error = -EACCES;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-10-22 13:42:39 +00:00
|
|
|
auto elfData = handle->readEntireFile();
|
2018-10-25 10:06:00 +00:00
|
|
|
if (!elfData) {
|
|
|
|
error = -EIO; // FIXME: Get a more detailed error from VFS.
|
2018-10-22 13:42:39 +00:00
|
|
|
return nullptr;
|
2018-10-25 10:06:00 +00:00
|
|
|
}
|
2018-10-22 13:42:39 +00:00
|
|
|
|
2018-10-26 09:16:56 +00:00
|
|
|
Vector<String> taskArguments;
|
|
|
|
if (args) {
|
|
|
|
for (size_t i = 0; args[i]; ++i) {
|
|
|
|
taskArguments.append(args[i]);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
taskArguments.append(parts.last());
|
|
|
|
}
|
|
|
|
|
2018-10-24 09:07:53 +00:00
|
|
|
InterruptDisabler disabler; // FIXME: Get rid of this, jesus christ. This "critical" section is HUGE.
|
2018-10-25 09:15:17 +00:00
|
|
|
Task* t = new Task(parts.takeLast(), uid, gid, parentPID, Ring3);
|
2018-10-22 13:42:39 +00:00
|
|
|
|
2018-10-26 09:16:56 +00:00
|
|
|
t->m_arguments = move(taskArguments);
|
|
|
|
|
2018-10-22 13:42:39 +00:00
|
|
|
ExecSpace space;
|
2018-10-27 12:56:52 +00:00
|
|
|
Region* region = nullptr;
|
2018-10-22 13:42:39 +00:00
|
|
|
space.hookableAlloc = [&] (const String& name, size_t size) {
|
|
|
|
if (!size)
|
|
|
|
return (void*)nullptr;
|
|
|
|
size = ((size / 4096) + 1) * 4096;
|
2018-10-27 12:56:52 +00:00
|
|
|
region = t->allocateRegion(size, String(name));
|
2018-10-22 13:42:39 +00:00
|
|
|
ASSERT(region);
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.mapRegion(*t, *region);
|
2018-10-22 13:42:39 +00:00
|
|
|
return (void*)region->linearAddress.asPtr();
|
|
|
|
};
|
|
|
|
bool success = space.loadELF(move(elfData));
|
|
|
|
if (!success) {
|
2018-10-26 10:22:22 +00:00
|
|
|
// FIXME: This is ugly. If we need to do this, it should be at a different level.
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.unmapRegionsForTask(*t);
|
|
|
|
MM.mapRegionsForTask(*current);
|
2018-10-22 13:42:39 +00:00
|
|
|
delete t;
|
2018-10-25 08:00:37 +00:00
|
|
|
kprintf("Failure loading ELF %s\n", path.characters());
|
2018-10-25 10:06:00 +00:00
|
|
|
error = -ENOEXEC;
|
2018-10-22 13:42:39 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
space.forEachArea([&] (const String& name, dword offset, size_t size, LinearAddress laddr) {
|
|
|
|
if (laddr.isNull())
|
|
|
|
return;
|
|
|
|
dword roundedOffset = offset & 0xfffff000;
|
|
|
|
size_t roundedSize = 4096 * ceilDiv((offset - roundedOffset) + size, 4096u);
|
|
|
|
LinearAddress roundedLaddr = laddr;
|
|
|
|
roundedLaddr.mask(0xfffff000);
|
|
|
|
t->m_subregions.append(make<Subregion>(*region, roundedOffset, roundedSize, roundedLaddr, String(name)));
|
|
|
|
#ifdef SUBREGION_DEBUG
|
|
|
|
kprintf(" req subregion %s (offset: %u, size: %u) @ %p\n", name.characters(), offset, size, laddr.get());
|
|
|
|
kprintf("actual subregion %s (offset: %u, size: %u) @ %p\n", name.characters(), roundedOffset, roundedSize, roundedLaddr.get());
|
|
|
|
#endif
|
|
|
|
MM.mapSubregion(*t, *t->m_subregions.last());
|
|
|
|
});
|
|
|
|
|
2018-10-22 13:42:39 +00:00
|
|
|
t->m_tss.eip = (dword)space.symbolPtr("_start");
|
2018-10-23 08:12:50 +00:00
|
|
|
if (!t->m_tss.eip) {
|
2018-10-26 10:22:22 +00:00
|
|
|
// FIXME: This is ugly. If we need to do this, it should be at a different level.
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.unmapRegionsForTask(*t);
|
|
|
|
MM.mapRegionsForTask(*current);
|
2018-10-23 08:12:50 +00:00
|
|
|
delete t;
|
2018-10-25 10:06:00 +00:00
|
|
|
error = -ENOEXEC;
|
2018-10-23 08:12:50 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-10-26 10:22:22 +00:00
|
|
|
// FIXME: This is ugly. If we need to do this, it should be at a different level.
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.unmapRegionsForTask(*t);
|
|
|
|
MM.mapRegionsForTask(*current);
|
2018-10-22 13:42:39 +00:00
|
|
|
|
|
|
|
s_tasks->prepend(t);
|
|
|
|
system.nprocess++;
|
2018-10-23 22:20:34 +00:00
|
|
|
#ifdef TASK_DEBUG
|
2018-10-22 13:42:39 +00:00
|
|
|
kprintf("Task %u (%s) spawned @ %p\n", t->pid(), t->name().characters(), t->m_tss.eip);
|
2018-10-23 22:20:34 +00:00
|
|
|
#endif
|
2018-10-22 13:42:39 +00:00
|
|
|
|
2018-10-25 10:06:00 +00:00
|
|
|
error = 0;
|
2018-10-22 13:42:39 +00:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2018-10-26 09:16:56 +00:00
|
|
|
int Task::sys$get_arguments(int* argc, char*** argv)
|
|
|
|
{
|
|
|
|
auto* region = allocateRegion(4096, "argv");
|
|
|
|
if (!region)
|
|
|
|
return -ENOMEM;
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.mapRegion(*this, *region);
|
2018-10-26 09:16:56 +00:00
|
|
|
char* argpage = (char*)region->linearAddress.get();
|
|
|
|
*argc = m_arguments.size();
|
|
|
|
*argv = (char**)argpage;
|
|
|
|
char* bufptr = argpage + (sizeof(char*) * m_arguments.size());
|
|
|
|
for (size_t i = 0; i < m_arguments.size(); ++i) {
|
|
|
|
(*argv)[i] = bufptr;
|
|
|
|
memcpy(bufptr, m_arguments[i].characters(), m_arguments[i].length());
|
|
|
|
bufptr += m_arguments[i].length();
|
|
|
|
*(bufptr++) = '\0';
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-25 09:15:17 +00:00
|
|
|
Task* Task::createKernelTask(void (*e)(), String&& name)
|
|
|
|
{
|
|
|
|
Task* task = new Task(move(name), (uid_t)0, (gid_t)0, (pid_t)0, Ring0);
|
|
|
|
task->m_tss.eip = (dword)e;
|
|
|
|
|
|
|
|
if (task->pid() != 0) {
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
s_tasks->prepend(task);
|
|
|
|
system.nprocess++;
|
|
|
|
#ifdef TASK_DEBUG
|
|
|
|
kprintf("Kernel task %u (%s) spawned @ %p\n", task->pid(), task->name().characters(), task->m_tss.eip);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring)
|
2018-10-22 13:42:39 +00:00
|
|
|
: m_name(move(name))
|
|
|
|
, m_pid(next_pid++)
|
|
|
|
, m_uid(uid)
|
|
|
|
, m_gid(gid)
|
|
|
|
, m_state(Runnable)
|
2018-10-25 09:15:17 +00:00
|
|
|
, m_ring(ring)
|
2018-10-24 12:28:22 +00:00
|
|
|
, m_parentPID(parentPID)
|
2018-10-22 13:42:39 +00:00
|
|
|
{
|
2018-10-25 09:15:17 +00:00
|
|
|
m_fileHandles.append(nullptr); // stdin
|
|
|
|
m_fileHandles.append(nullptr); // stdout
|
|
|
|
m_fileHandles.append(nullptr); // stderr
|
2018-10-24 10:43:52 +00:00
|
|
|
|
2018-10-24 12:28:22 +00:00
|
|
|
auto* parentTask = Task::fromPID(parentPID);
|
|
|
|
if (parentTask)
|
2018-10-26 12:24:11 +00:00
|
|
|
m_cwd = parentTask->m_cwd.copyRef();
|
2018-10-24 12:28:22 +00:00
|
|
|
else
|
2018-10-26 12:24:11 +00:00
|
|
|
m_cwd = nullptr;
|
2018-10-24 12:28:22 +00:00
|
|
|
|
2018-10-22 13:42:39 +00:00
|
|
|
m_nextRegion = LinearAddress(0x600000);
|
|
|
|
|
|
|
|
memset(&m_tss, 0, sizeof(m_tss));
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-25 09:15:17 +00:00
|
|
|
if (isRing3()) {
|
|
|
|
memset(&m_ldtEntries, 0, sizeof(m_ldtEntries));
|
2018-10-16 09:01:38 +00:00
|
|
|
allocateLDT();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only IF is set when a task boots.
|
|
|
|
m_tss.eflags = 0x0202;
|
|
|
|
|
2018-10-25 09:15:17 +00:00
|
|
|
word cs, ds, ss;
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-25 09:15:17 +00:00
|
|
|
if (isRing0()) {
|
|
|
|
cs = 0x08;
|
|
|
|
ds = 0x10;
|
|
|
|
ss = 0x10;
|
2018-10-16 09:01:38 +00:00
|
|
|
} else {
|
2018-10-25 09:15:17 +00:00
|
|
|
cs = 0x1b;
|
|
|
|
ds = 0x23;
|
|
|
|
ss = 0x23;
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2018-10-25 09:15:17 +00:00
|
|
|
m_tss.ds = ds;
|
|
|
|
m_tss.es = ds;
|
|
|
|
m_tss.fs = ds;
|
|
|
|
m_tss.gs = ds;
|
|
|
|
m_tss.ss = ss;
|
|
|
|
m_tss.cs = cs;
|
2018-10-22 09:15:16 +00:00
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
m_tss.cr3 = MM.pageDirectoryBase().get();
|
2018-10-17 21:13:55 +00:00
|
|
|
|
2018-10-18 12:53:00 +00:00
|
|
|
if (isRing0()) {
|
|
|
|
// FIXME: This memory is leaked.
|
|
|
|
// But uh, there's also no kernel task termination, so I guess it's not technically leaked...
|
2018-10-19 09:28:43 +00:00
|
|
|
dword stackBottom = (dword)kmalloc(defaultStackSize);
|
2018-10-26 20:32:35 +00:00
|
|
|
m_stackTop0 = (stackBottom + defaultStackSize) & 0xffffff8;
|
|
|
|
m_tss.esp = m_stackTop0;
|
2018-10-18 12:53:00 +00:00
|
|
|
} else {
|
|
|
|
auto* region = allocateRegion(defaultStackSize, "stack");
|
|
|
|
ASSERT(region);
|
2018-10-26 20:32:35 +00:00
|
|
|
m_stackTop3 = region->linearAddress.offset(defaultStackSize).get() & 0xfffffff8;
|
|
|
|
m_tss.esp = m_stackTop3;
|
2018-10-18 12:53:00 +00:00
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-25 09:15:17 +00:00
|
|
|
if (isRing3()) {
|
|
|
|
// Ring3 tasks need a separate stack for Ring0.
|
2018-10-18 12:53:00 +00:00
|
|
|
m_kernelStack = kmalloc(defaultStackSize);
|
2018-10-26 20:32:35 +00:00
|
|
|
m_stackTop0 = ((DWORD)m_kernelStack + defaultStackSize) & 0xffffff8;
|
2018-10-16 09:01:38 +00:00
|
|
|
m_tss.ss0 = 0x10;
|
2018-10-26 20:32:35 +00:00
|
|
|
m_tss.esp0 = m_stackTop0;
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// HACK: Ring2 SS in the TSS is the current PID.
|
|
|
|
m_tss.ss2 = m_pid;
|
2018-10-25 09:15:17 +00:00
|
|
|
m_farPtr.offset = 0x98765432;
|
2018-10-26 15:42:12 +00:00
|
|
|
|
|
|
|
ProcFileSystem::the().addProcess(*this);
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Task::~Task()
|
|
|
|
{
|
2018-10-25 09:15:17 +00:00
|
|
|
InterruptDisabler disabler;
|
2018-10-26 15:42:12 +00:00
|
|
|
ProcFileSystem::the().removeProcess(*this);
|
2018-10-17 22:26:30 +00:00
|
|
|
system.nprocess--;
|
2018-10-16 09:01:38 +00:00
|
|
|
delete [] m_ldtEntries;
|
|
|
|
m_ldtEntries = nullptr;
|
2018-10-18 12:53:00 +00:00
|
|
|
|
|
|
|
if (m_kernelStack) {
|
|
|
|
kfree(m_kernelStack);
|
|
|
|
m_kernelStack = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Task::dumpRegions()
|
|
|
|
{
|
|
|
|
kprintf("Task %s(%u) regions:\n", name().characters(), pid());
|
|
|
|
kprintf("BEGIN END SIZE NAME\n");
|
|
|
|
for (auto& region : m_regions) {
|
|
|
|
kprintf("%x -- %x %x %s\n",
|
|
|
|
region->linearAddress.get(),
|
|
|
|
region->linearAddress.offset(region->size - 1).get(),
|
|
|
|
region->size,
|
|
|
|
region->name.characters());
|
|
|
|
}
|
2018-10-27 12:56:52 +00:00
|
|
|
|
|
|
|
kprintf("Task %s(%u) subregions:\n", name().characters(), pid());
|
|
|
|
kprintf("REGION OFFSET BEGIN END SIZE NAME\n");
|
|
|
|
for (auto& subregion : m_subregions) {
|
|
|
|
kprintf("%x %x %x -- %x %x %s\n",
|
|
|
|
subregion->region->linearAddress.get(),
|
|
|
|
subregion->offset,
|
|
|
|
subregion->linearAddress.get(),
|
|
|
|
subregion->linearAddress.offset(subregion->size - 1).get(),
|
|
|
|
subregion->size,
|
|
|
|
subregion->name.characters());
|
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2018-10-22 09:43:55 +00:00
|
|
|
void Task::sys$exit(int status)
|
|
|
|
{
|
|
|
|
cli();
|
2018-10-23 22:20:34 +00:00
|
|
|
#ifdef TASK_DEBUG
|
2018-10-22 09:43:55 +00:00
|
|
|
kprintf("sys$exit: %s(%u) exit with status %d\n", name().characters(), pid(), status);
|
2018-10-23 22:20:34 +00:00
|
|
|
#endif
|
2018-10-22 09:43:55 +00:00
|
|
|
|
|
|
|
setState(Exiting);
|
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.unmapRegionsForTask(*this);
|
2018-10-23 13:19:02 +00:00
|
|
|
|
2018-10-22 09:43:55 +00:00
|
|
|
s_tasks->remove(this);
|
|
|
|
|
2018-10-26 23:24:22 +00:00
|
|
|
for (auto* task = s_tasks->head(); task; task = task->next()) {
|
|
|
|
if (task->waitee() == m_pid)
|
|
|
|
task->m_waiteeStatus = status << 8;
|
|
|
|
}
|
|
|
|
|
2018-10-22 09:43:55 +00:00
|
|
|
if (!scheduleNewTask()) {
|
2018-10-26 23:24:22 +00:00
|
|
|
kprintf("Task::sys$exit: Failed to schedule a new task :(\n");
|
2018-10-22 09:43:55 +00:00
|
|
|
HANG;
|
|
|
|
}
|
|
|
|
|
2018-10-23 13:41:55 +00:00
|
|
|
s_deadTasks->append(this);
|
2018-10-22 09:43:55 +00:00
|
|
|
|
|
|
|
switchNow();
|
|
|
|
}
|
|
|
|
|
2018-10-17 22:26:30 +00:00
|
|
|
void Task::taskDidCrash(Task* crashedTask)
|
|
|
|
{
|
2018-10-25 08:33:10 +00:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
|
|
|
|
2018-10-17 22:26:30 +00:00
|
|
|
crashedTask->setState(Crashing);
|
2018-10-23 13:19:02 +00:00
|
|
|
crashedTask->dumpRegions();
|
2018-10-18 12:53:00 +00:00
|
|
|
|
2018-10-17 22:26:30 +00:00
|
|
|
s_tasks->remove(crashedTask);
|
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
MM.unmapRegionsForTask(*crashedTask);
|
2018-10-23 08:12:50 +00:00
|
|
|
|
2018-10-17 22:26:30 +00:00
|
|
|
if (!scheduleNewTask()) {
|
|
|
|
kprintf("Task::taskDidCrash: Failed to schedule a new task :(\n");
|
|
|
|
HANG;
|
|
|
|
}
|
|
|
|
|
2018-10-23 13:41:55 +00:00
|
|
|
s_deadTasks->append(crashedTask);
|
2018-10-17 22:26:30 +00:00
|
|
|
|
|
|
|
switchNow();
|
|
|
|
}
|
|
|
|
|
2018-10-23 13:41:55 +00:00
|
|
|
void Task::doHouseKeeping()
|
|
|
|
{
|
2018-10-25 09:15:17 +00:00
|
|
|
InterruptDisabler disabler;
|
|
|
|
if (s_deadTasks->isEmpty())
|
|
|
|
return;
|
2018-10-23 13:41:55 +00:00
|
|
|
Task* next = nullptr;
|
|
|
|
for (auto* deadTask = s_deadTasks->head(); deadTask; deadTask = next) {
|
|
|
|
next = deadTask->next();
|
|
|
|
delete deadTask;
|
|
|
|
}
|
|
|
|
s_deadTasks->clear();
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
void yield()
|
|
|
|
{
|
|
|
|
if (!current) {
|
|
|
|
kprintf( "PANIC: yield() with !current" );
|
|
|
|
HANG;
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
//kprintf("%s<%u> yield()\n", current->name().characters(), current->pid());
|
|
|
|
|
2018-10-24 09:07:53 +00:00
|
|
|
InterruptDisabler disabler;
|
|
|
|
if (!scheduleNewTask())
|
2018-10-16 09:06:35 +00:00
|
|
|
return;
|
|
|
|
|
2018-10-17 21:49:32 +00:00
|
|
|
//kprintf("yield() jumping to new task: %x (%s)\n", current->farPtr().selector, current->name().characters());
|
|
|
|
switchNow();
|
|
|
|
}
|
|
|
|
|
|
|
|
void switchNow()
|
|
|
|
{
|
2018-10-16 09:06:35 +00:00
|
|
|
Descriptor& descriptor = getGDTEntry(current->selector());
|
|
|
|
descriptor.type = 9;
|
|
|
|
flushGDT();
|
2018-10-19 09:28:43 +00:00
|
|
|
asm("sti\n"
|
2018-10-16 09:06:35 +00:00
|
|
|
"ljmp *(%%eax)\n"
|
|
|
|
::"a"(¤t->farPtr())
|
|
|
|
);
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
bool scheduleNewTask()
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2018-10-25 08:33:10 +00:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
if (!current) {
|
|
|
|
// XXX: The first ever context_switch() goes to the idle task.
|
|
|
|
// This to setup a reliable place we can return to.
|
2018-10-16 09:06:35 +00:00
|
|
|
return contextSwitch(Task::kernelTask());
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check and unblock tasks whose wait conditions have been met.
|
|
|
|
for (auto* task = s_tasks->head(); task; task = task->next()) {
|
|
|
|
if (task->state() == Task::BlockedSleep) {
|
|
|
|
if (task->wakeupTime() <= system.uptime) {
|
|
|
|
task->unblock();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2018-10-23 22:20:34 +00:00
|
|
|
|
|
|
|
if (task->state() == Task::BlockedWait) {
|
|
|
|
if (!Task::fromPID(task->waitee())) {
|
|
|
|
task->unblock();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2018-10-25 11:07:59 +00:00
|
|
|
|
|
|
|
if (task->state() == Task::BlockedRead) {
|
|
|
|
ASSERT(task->m_fdBlockedOnRead != -1);
|
|
|
|
if (task->m_fileHandles[task->m_fdBlockedOnRead]->hasDataAvailableForRead()) {
|
|
|
|
task->unblock();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2018-10-25 10:45:29 +00:00
|
|
|
#if 0
|
|
|
|
kprintf("Scheduler choices:\n");
|
|
|
|
for (auto* task = s_tasks->head(); task; task = task->next()) {
|
|
|
|
if (task->state() == Task::BlockedWait || task->state() == Task::BlockedSleep)
|
|
|
|
continue;
|
|
|
|
kprintf("%w %s(%u)\n", task->state(), task->name().characters(), task->pid());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
auto* prevHead = s_tasks->head();
|
|
|
|
for (;;) {
|
|
|
|
// Move head to tail.
|
|
|
|
s_tasks->append(s_tasks->removeHead());
|
|
|
|
auto* task = s_tasks->head();
|
|
|
|
|
|
|
|
if (task->state() == Task::Runnable || task->state() == Task::Running) {
|
2018-10-22 09:15:16 +00:00
|
|
|
//kprintf("switch to %s (%p vs %p)\n", task->name().characters(), task, current);
|
2018-10-16 09:06:35 +00:00
|
|
|
return contextSwitch(task);
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (task == prevHead) {
|
|
|
|
// Back at task_head, nothing wants to run.
|
2018-10-23 23:02:55 +00:00
|
|
|
kprintf("Nothing wants to run!\n");
|
|
|
|
kprintf("PID OWNER STATE NSCHED NAME\n");
|
|
|
|
for (auto* task = s_tasks->head(); task; task = task->next()) {
|
|
|
|
kprintf("%w %w:%w %b %w %s\n",
|
|
|
|
task->pid(),
|
|
|
|
task->uid(),
|
|
|
|
task->gid(),
|
|
|
|
task->state(),
|
|
|
|
task->timesScheduled(),
|
|
|
|
task->name().characters());
|
|
|
|
}
|
2018-10-22 09:15:16 +00:00
|
|
|
kprintf("Switch to kernel task\n");
|
2018-10-16 09:06:35 +00:00
|
|
|
return contextSwitch(Task::kernelTask());
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
static bool contextSwitch(Task* t)
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2018-10-16 09:06:35 +00:00
|
|
|
//kprintf("c_s to %s (same:%u)\n", t->name().characters(), current == t);
|
2018-10-16 09:01:38 +00:00
|
|
|
t->setTicksLeft(5);
|
2018-10-25 10:45:29 +00:00
|
|
|
t->didSchedule();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
if (current == t)
|
|
|
|
return false;
|
|
|
|
|
2018-10-22 09:15:16 +00:00
|
|
|
// Some sanity checking to force a crash earlier.
|
|
|
|
auto csRPL = t->tss().cs & 3;
|
|
|
|
auto ssRPL = t->tss().ss & 3;
|
2018-10-23 08:12:50 +00:00
|
|
|
|
|
|
|
if (csRPL != ssRPL) {
|
|
|
|
kprintf("Fuckup! Switching from %s(%u) to %s(%u) has RPL mismatch\n",
|
|
|
|
current->name().characters(), current->pid(),
|
|
|
|
t->name().characters(), t->pid()
|
|
|
|
);
|
|
|
|
kprintf("code: %w:%x\n", t->tss().cs, t->tss().eip);
|
|
|
|
kprintf(" stk: %w:%x\n", t->tss().ss, t->tss().esp);
|
|
|
|
ASSERT(csRPL == ssRPL);
|
|
|
|
}
|
2018-10-22 09:15:16 +00:00
|
|
|
|
2018-10-21 19:57:43 +00:00
|
|
|
if (current) {
|
|
|
|
// If the last task hasn't blocked (still marked as running),
|
|
|
|
// mark it as runnable for the next round.
|
|
|
|
if (current->state() == Task::Running)
|
|
|
|
current->setState(Task::Runnable);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
bool success = MM.unmapRegionsForTask(*current);
|
2018-10-21 19:57:43 +00:00
|
|
|
ASSERT(success);
|
|
|
|
}
|
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
bool success = MM.mapRegionsForTask(*t);
|
2018-10-18 11:05:00 +00:00
|
|
|
ASSERT(success);
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
current = t;
|
|
|
|
t->setState(Task::Running);
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
if (!t->selector())
|
2018-10-16 09:01:38 +00:00
|
|
|
t->setSelector(allocateGDTEntry());
|
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
auto& tssDescriptor = getGDTEntry(t->selector());
|
|
|
|
|
|
|
|
tssDescriptor.limit_hi = 0;
|
|
|
|
tssDescriptor.limit_lo = 0xFFFF;
|
|
|
|
tssDescriptor.base_lo = (DWORD)(&t->tss()) & 0xFFFF;
|
|
|
|
tssDescriptor.base_hi = ((DWORD)(&t->tss()) >> 16) & 0xFF;
|
|
|
|
tssDescriptor.base_hi2 = ((DWORD)(&t->tss()) >> 24) & 0xFF;
|
|
|
|
tssDescriptor.dpl = 0;
|
|
|
|
tssDescriptor.segment_present = 1;
|
|
|
|
tssDescriptor.granularity = 1;
|
|
|
|
tssDescriptor.zero = 0;
|
|
|
|
tssDescriptor.operation_size = 1;
|
|
|
|
tssDescriptor.descriptor_type = 0;
|
|
|
|
tssDescriptor.type = 11; // Busy TSS
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2018-10-16 09:06:35 +00:00
|
|
|
flushGDT();
|
|
|
|
return true;
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Task* Task::fromPID(pid_t pid)
|
|
|
|
{
|
2018-10-26 13:11:29 +00:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2018-10-16 09:01:38 +00:00
|
|
|
for (auto* task = s_tasks->head(); task; task = task->next()) {
|
|
|
|
if (task->pid() == pid)
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
FileHandle* Task::fileHandleIfExists(int fd)
|
|
|
|
{
|
|
|
|
if (fd < 0)
|
|
|
|
return nullptr;
|
|
|
|
if ((unsigned)fd < m_fileHandles.size())
|
2018-10-18 08:28:09 +00:00
|
|
|
return m_fileHandles[fd].ptr();
|
2018-10-16 09:01:38 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-10-24 10:43:52 +00:00
|
|
|
ssize_t Task::sys$get_dir_entries(int fd, void* buffer, size_t size)
|
|
|
|
{
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(buffer, size);
|
2018-10-24 10:43:52 +00:00
|
|
|
auto* handle = fileHandleIfExists(fd);
|
|
|
|
if (!handle)
|
|
|
|
return -1;
|
|
|
|
return handle->get_dir_entries((byte*)buffer, size);
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
int Task::sys$seek(int fd, int offset)
|
|
|
|
{
|
2018-10-18 08:28:09 +00:00
|
|
|
auto* handle = fileHandleIfExists(fd);
|
2018-10-16 09:01:38 +00:00
|
|
|
if (!handle)
|
|
|
|
return -1;
|
2018-10-18 08:28:09 +00:00
|
|
|
return handle->seek(offset, SEEK_SET);
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2018-10-18 08:28:09 +00:00
|
|
|
ssize_t Task::sys$read(int fd, void* outbuf, size_t nread)
|
2018-10-16 09:01:38 +00:00
|
|
|
{
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(outbuf, nread);
|
2018-10-23 08:12:50 +00:00
|
|
|
#ifdef DEBUG_IO
|
2018-10-16 09:01:38 +00:00
|
|
|
kprintf("Task::sys$read: called(%d, %p, %u)\n", fd, outbuf, nread);
|
2018-10-23 08:12:50 +00:00
|
|
|
#endif
|
2018-10-18 08:28:09 +00:00
|
|
|
auto* handle = fileHandleIfExists(fd);
|
2018-10-23 08:12:50 +00:00
|
|
|
#ifdef DEBUG_IO
|
2018-10-16 09:01:38 +00:00
|
|
|
kprintf("Task::sys$read: handle=%p\n", handle);
|
2018-10-23 08:12:50 +00:00
|
|
|
#endif
|
2018-10-16 09:01:38 +00:00
|
|
|
if (!handle) {
|
|
|
|
kprintf("Task::sys$read: handle not found :(\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-10-23 08:12:50 +00:00
|
|
|
#ifdef DEBUG_IO
|
2018-10-16 09:01:38 +00:00
|
|
|
kprintf("call read on handle=%p\n", handle);
|
2018-10-23 08:12:50 +00:00
|
|
|
#endif
|
2018-10-25 11:07:59 +00:00
|
|
|
if (handle->isBlocking()) {
|
|
|
|
if (!handle->hasDataAvailableForRead()) {
|
|
|
|
m_fdBlockedOnRead = fd;
|
|
|
|
block(BlockedRead);
|
|
|
|
yield();
|
|
|
|
}
|
|
|
|
}
|
2018-10-18 08:28:09 +00:00
|
|
|
nread = handle->read((byte*)outbuf, nread);
|
2018-10-23 08:12:50 +00:00
|
|
|
#ifdef DEBUG_IO
|
2018-10-16 09:01:38 +00:00
|
|
|
kprintf("Task::sys$read: nread=%u\n", nread);
|
2018-10-23 08:12:50 +00:00
|
|
|
#endif
|
2018-10-16 09:01:38 +00:00
|
|
|
return nread;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Task::sys$close(int fd)
|
|
|
|
{
|
2018-10-18 08:28:09 +00:00
|
|
|
auto* handle = fileHandleIfExists(fd);
|
2018-10-16 09:01:38 +00:00
|
|
|
if (!handle)
|
|
|
|
return -1;
|
2018-10-18 08:28:09 +00:00
|
|
|
// FIXME: Implement.
|
2018-10-16 09:01:38 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-26 22:29:31 +00:00
|
|
|
int Task::sys$lstat(const char* path, Unix::stat* statbuf)
|
2018-10-24 11:19:36 +00:00
|
|
|
{
|
2018-10-26 22:29:31 +00:00
|
|
|
VALIDATE_USER_BUFFER(statbuf, sizeof(Unix::stat));
|
2018-10-26 12:24:11 +00:00
|
|
|
auto handle = VirtualFileSystem::the().open(move(path), m_cwd.ptr());
|
2018-10-24 11:19:36 +00:00
|
|
|
if (!handle)
|
|
|
|
return -1;
|
2018-10-26 22:29:31 +00:00
|
|
|
handle->stat(statbuf);
|
2018-10-24 11:19:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-26 12:24:11 +00:00
|
|
|
int Task::sys$chdir(const char* path)
|
2018-10-24 12:28:22 +00:00
|
|
|
{
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(path, strlen(path));
|
2018-10-26 12:24:11 +00:00
|
|
|
auto handle = VirtualFileSystem::the().open(path, m_cwd.ptr());
|
|
|
|
if (!handle)
|
|
|
|
return -ENOENT; // FIXME: More detailed error.
|
|
|
|
if (!handle->isDirectory())
|
|
|
|
return -ENOTDIR;
|
|
|
|
m_cwd = handle->vnode();
|
2018-10-24 12:28:22 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-26 12:24:11 +00:00
|
|
|
int Task::sys$getcwd(char* buffer, size_t size)
|
|
|
|
{
|
|
|
|
// FIXME: Implement!
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(buffer, size);
|
2018-10-26 12:24:11 +00:00
|
|
|
return -ENOTIMPL;
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
int Task::sys$open(const char* path, size_t pathLength)
|
|
|
|
{
|
2018-10-23 08:12:50 +00:00
|
|
|
#ifdef DEBUG_IO
|
2018-10-16 09:01:38 +00:00
|
|
|
kprintf("Task::sys$open(): PID=%u, path=%s {%u}\n", m_pid, path, pathLength);
|
2018-10-23 08:12:50 +00:00
|
|
|
#endif
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(path, pathLength);
|
2018-10-26 12:30:13 +00:00
|
|
|
if (m_fileHandles.size() >= m_maxFileHandles)
|
|
|
|
return -EMFILE;
|
|
|
|
auto handle = VirtualFileSystem::the().open(String(path, pathLength), m_cwd.ptr());
|
|
|
|
if (!handle)
|
|
|
|
return -ENOENT; // FIXME: Detailed error.
|
|
|
|
int fd = m_fileHandles.size();
|
|
|
|
handle->setFD(fd);
|
|
|
|
m_fileHandles.append(move(handle));
|
|
|
|
return fd;
|
2018-10-16 09:01:38 +00:00
|
|
|
}
|
|
|
|
|
2018-10-26 12:56:21 +00:00
|
|
|
int Task::sys$uname(utsname* buf)
|
|
|
|
{
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(buf, sizeof(utsname));
|
2018-10-26 12:56:21 +00:00
|
|
|
strcpy(buf->sysname, "Serenity");
|
|
|
|
strcpy(buf->release, "1.0-dev");
|
|
|
|
strcpy(buf->version, "FIXME");
|
|
|
|
strcpy(buf->machine, "i386");
|
|
|
|
strcpy(buf->nodename, getHostname().characters());
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
int Task::sys$kill(pid_t pid, int sig)
|
|
|
|
{
|
|
|
|
(void) sig;
|
|
|
|
if (pid == 0) {
|
|
|
|
// FIXME: Send to same-group processes.
|
|
|
|
ASSERT(pid != 0);
|
|
|
|
}
|
|
|
|
if (pid == -1) {
|
|
|
|
// FIXME: Send to all processes.
|
|
|
|
ASSERT(pid != -1);
|
|
|
|
}
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
Task* peer = Task::fromPID(pid);
|
|
|
|
if (!peer) {
|
|
|
|
// errno = ESRCH;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-10-25 11:53:49 +00:00
|
|
|
int Task::sys$sleep(unsigned seconds)
|
|
|
|
{
|
|
|
|
if (!seconds)
|
|
|
|
return 0;
|
|
|
|
sleep(seconds * TICKS_PER_SECOND);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-25 15:29:49 +00:00
|
|
|
int Task::sys$gettimeofday(timeval* tv)
|
|
|
|
{
|
2018-10-26 22:25:08 +00:00
|
|
|
VALIDATE_USER_BUFFER(tv, sizeof(tv));
|
2018-10-25 15:29:49 +00:00
|
|
|
InterruptDisabler disabler;
|
|
|
|
auto now = RTC::now();
|
|
|
|
tv->tv_sec = now;
|
|
|
|
tv->tv_usec = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
uid_t Task::sys$getuid()
|
|
|
|
{
|
|
|
|
return m_uid;
|
|
|
|
}
|
|
|
|
|
2018-10-22 11:55:11 +00:00
|
|
|
gid_t Task::sys$getgid()
|
|
|
|
{
|
|
|
|
return m_gid;
|
|
|
|
}
|
|
|
|
|
|
|
|
pid_t Task::sys$getpid()
|
|
|
|
{
|
|
|
|
return m_pid;
|
|
|
|
}
|
|
|
|
|
2018-10-26 23:24:22 +00:00
|
|
|
pid_t Task::sys$waitpid(pid_t waitee, int* wstatus, int options)
|
2018-10-23 22:20:34 +00:00
|
|
|
{
|
2018-10-26 23:24:22 +00:00
|
|
|
if (wstatus)
|
|
|
|
VALIDATE_USER_BUFFER(wstatus, sizeof(int));
|
|
|
|
|
2018-10-25 08:33:10 +00:00
|
|
|
InterruptDisabler disabler;
|
2018-10-23 22:20:34 +00:00
|
|
|
if (!Task::fromPID(waitee))
|
|
|
|
return -1;
|
|
|
|
m_waitee = waitee;
|
2018-10-26 23:24:22 +00:00
|
|
|
m_waiteeStatus = 0;
|
2018-10-23 22:20:34 +00:00
|
|
|
block(BlockedWait);
|
|
|
|
yield();
|
2018-10-26 23:24:22 +00:00
|
|
|
if (wstatus)
|
|
|
|
*wstatus = m_waiteeStatus;
|
2018-10-23 22:20:34 +00:00
|
|
|
return m_waitee;
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
void Task::unblock()
|
|
|
|
{
|
|
|
|
ASSERT(m_state != Task::Runnable && m_state != Task::Running);
|
|
|
|
system.nblocked--;
|
|
|
|
m_state = Task::Runnable;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Task::block(Task::State state)
|
|
|
|
{
|
|
|
|
ASSERT(current->state() == Task::Running);
|
|
|
|
system.nblocked++;
|
|
|
|
current->setState(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
void block(Task::State state)
|
|
|
|
{
|
|
|
|
current->block(state);
|
|
|
|
yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
void sleep(DWORD ticks)
|
|
|
|
{
|
|
|
|
ASSERT(current->state() == Task::Running);
|
|
|
|
current->setWakeupTime(system.uptime + ticks);
|
|
|
|
current->block(Task::BlockedSleep);
|
|
|
|
yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
Task* Task::kernelTask()
|
|
|
|
{
|
|
|
|
ASSERT(s_kernelTask);
|
|
|
|
return s_kernelTask;
|
|
|
|
}
|
|
|
|
|
2018-10-18 12:53:00 +00:00
|
|
|
Task::Region::Region(LinearAddress a, size_t s, RetainPtr<Zone>&& z, String&& n)
|
|
|
|
: linearAddress(a)
|
|
|
|
, size(s)
|
|
|
|
, zone(move(z))
|
|
|
|
, name(move(n))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
Task::Region::~Region()
|
|
|
|
{
|
|
|
|
}
|
2018-10-26 22:14:24 +00:00
|
|
|
|
2018-10-27 12:56:52 +00:00
|
|
|
Task::Subregion::Subregion(Region& r, dword o, size_t s, LinearAddress l, String&& n)\
|
|
|
|
: region(r)
|
|
|
|
, offset(o)
|
|
|
|
, size(s)
|
|
|
|
, linearAddress(l)
|
|
|
|
, name(move(n))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Task::Subregion::~Subregion()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-10-26 22:14:24 +00:00
|
|
|
bool Task::isValidAddressForKernel(LinearAddress laddr) const
|
|
|
|
{
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
if (laddr.get() >= ksyms().first().address && laddr.get() <= ksyms().last().address)
|
|
|
|
return true;
|
|
|
|
if (is_kmalloc_address((void*)laddr.get()))
|
|
|
|
return true;
|
|
|
|
return isValidAddressForUser(laddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Task::isValidAddressForUser(LinearAddress laddr) const
|
|
|
|
{
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
for (auto& region: m_regions) {
|
|
|
|
if (laddr >= region->linearAddress && laddr < region->linearAddress.offset(region->size))
|
|
|
|
return true;
|
|
|
|
}
|
2018-10-27 12:56:52 +00:00
|
|
|
for (auto& subregion: m_subregions) {
|
|
|
|
if (laddr >= subregion->linearAddress && laddr < subregion->linearAddress.offset(subregion->size))
|
|
|
|
return true;
|
|
|
|
}
|
2018-10-26 22:14:24 +00:00
|
|
|
return false;
|
|
|
|
}
|