2020-01-18 08:38:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
#pragma once
|
|
|
|
|
2020-05-31 18:56:17 +00:00
|
|
|
#include <AK/Checked.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <AK/HashMap.h>
|
2019-05-28 09:53:16 +00:00
|
|
|
#include <AK/InlineLinkedList.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <AK/NonnullOwnPtrVector.h>
|
2020-08-02 02:04:56 +00:00
|
|
|
#include <AK/NonnullRefPtrVector.h>
|
2019-09-27 12:19:07 +00:00
|
|
|
#include <AK/String.h>
|
2020-07-31 14:28:37 +00:00
|
|
|
#include <AK/Userspace.h>
|
2020-02-24 12:24:30 +00:00
|
|
|
#include <AK/WeakPtr.h>
|
2020-08-06 09:17:53 +00:00
|
|
|
#include <AK/Weakable.h>
|
2020-07-04 23:37:36 +00:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2020-02-16 01:01:42 +00:00
|
|
|
#include <Kernel/FileSystem/InodeMetadata.h>
|
2020-02-16 00:50:16 +00:00
|
|
|
#include <Kernel/Forward.h>
|
2020-12-22 06:21:58 +00:00
|
|
|
#include <Kernel/FutexQueue.h>
|
2019-05-28 09:53:16 +00:00
|
|
|
#include <Kernel/Lock.h>
|
2020-08-15 19:13:19 +00:00
|
|
|
#include <Kernel/ProcessGroup.h>
|
2020-06-18 20:18:44 +00:00
|
|
|
#include <Kernel/StdLib.h>
|
2019-03-23 21:03:17 +00:00
|
|
|
#include <Kernel/Thread.h>
|
2020-12-09 04:18:45 +00:00
|
|
|
#include <Kernel/ThreadTracer.h>
|
2019-05-28 09:53:16 +00:00
|
|
|
#include <Kernel/UnixTypes.h>
|
2020-12-26 10:24:34 +00:00
|
|
|
#include <Kernel/UnveilNode.h>
|
2020-09-05 21:52:14 +00:00
|
|
|
#include <Kernel/VM/AllocationStrategy.h>
|
2019-05-28 09:53:16 +00:00
|
|
|
#include <Kernel/VM/RangeAllocator.h>
|
2019-05-26 00:08:51 +00:00
|
|
|
#include <LibC/signal_numbers.h>
|
2021-01-10 19:07:08 +00:00
|
|
|
#include <Libraries/LibELF/exec_elf.h>
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2019-06-06 15:46:41 +00:00
|
|
|
timeval kgettimeofday();
|
2019-03-13 12:13:23 +00:00
|
|
|
void kgettimeofday(timeval&);
|
|
|
|
|
2019-07-19 15:01:16 +00:00
|
|
|
extern VirtualAddress g_return_to_ring3_from_signal_trampoline;
|
|
|
|
|
2020-06-18 20:18:44 +00:00
|
|
|
#define ENUMERATE_PLEDGE_PROMISES \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(stdio) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(rpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(wpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(cpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(dpath) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(inet) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(id) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(proc) \
|
2021-01-11 21:30:57 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(ptrace) \
|
2020-06-18 20:18:44 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(exec) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(unix) \
|
2020-06-24 20:57:37 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(recvfd) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(sendfd) \
|
2020-06-18 20:18:44 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(fattr) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(tty) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(chown) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(chroot) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(thread) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(video) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(accept) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(settime) \
|
2020-05-26 10:49:35 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(sigaction) \
|
2021-01-29 17:50:27 +00:00
|
|
|
__ENUMERATE_PLEDGE_PROMISE(setkeymap) \
|
|
|
|
__ENUMERATE_PLEDGE_PROMISE(prot_exec)
|
2020-01-11 19:48:43 +00:00
|
|
|
|
|
|
|
enum class Pledge : u32 {
|
|
|
|
#define __ENUMERATE_PLEDGE_PROMISE(x) x,
|
|
|
|
ENUMERATE_PLEDGE_PROMISES
|
|
|
|
#undef __ENUMERATE_PLEDGE_PROMISE
|
|
|
|
};
|
|
|
|
|
2020-01-21 18:28:29 +00:00
|
|
|
enum class VeilState {
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
None,
|
2020-01-21 18:28:29 +00:00
|
|
|
Dropped,
|
|
|
|
Locked,
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
};
|
|
|
|
|
2020-12-22 06:21:58 +00:00
|
|
|
typedef HashMap<FlatPtr, RefPtr<FutexQueue>> FutexQueues;
|
|
|
|
|
2020-08-06 09:17:53 +00:00
|
|
|
class Process
|
|
|
|
: public RefCounted<Process>
|
|
|
|
, public InlineLinkedListNode<Process>
|
|
|
|
, public Weakable<Process> {
|
|
|
|
|
2020-04-22 09:54:58 +00:00
|
|
|
AK_MAKE_NONCOPYABLE(Process);
|
|
|
|
AK_MAKE_NONMOVABLE(Process);
|
|
|
|
|
2018-11-01 12:15:46 +00:00
|
|
|
friend class InlineLinkedListNode<Process>;
|
2019-03-23 21:03:17 +00:00
|
|
|
friend class Thread;
|
2020-11-06 08:09:51 +00:00
|
|
|
friend class CoreDump;
|
2019-05-28 09:53:16 +00:00
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
public:
|
2020-06-28 21:34:31 +00:00
|
|
|
inline static Process* current()
|
|
|
|
{
|
2021-01-26 21:16:07 +00:00
|
|
|
auto current_thread = Processor::current_thread();
|
2020-06-28 21:34:31 +00:00
|
|
|
return current_thread ? ¤t_thread->process() : nullptr;
|
|
|
|
}
|
2020-02-17 14:04:27 +00:00
|
|
|
|
2020-11-17 03:51:34 +00:00
|
|
|
template<typename EntryFunction>
|
2020-09-05 21:52:14 +00:00
|
|
|
static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT)
|
2020-11-17 03:51:34 +00:00
|
|
|
{
|
|
|
|
auto* entry_func = new EntryFunction(move(entry));
|
2020-11-29 23:05:27 +00:00
|
|
|
return create_kernel_process(
|
|
|
|
first_thread, move(name), [](void* data) {
|
|
|
|
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
|
|
|
|
(*func)();
|
|
|
|
delete func;
|
|
|
|
},
|
|
|
|
entry_func, affinity);
|
2020-11-17 03:51:34 +00:00
|
|
|
}
|
|
|
|
|
2020-09-05 21:52:14 +00:00
|
|
|
static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT);
|
2020-09-27 14:53:35 +00:00
|
|
|
static RefPtr<Process> create_user_process(RefPtr<Thread>& first_thread, const String& path, uid_t, gid_t, ProcessID ppid, int& error, Vector<String>&& arguments = Vector<String>(), Vector<String>&& environment = Vector<String>(), TTY* = nullptr);
|
2018-11-01 12:15:46 +00:00
|
|
|
~Process();
|
2018-10-23 10:44:46 +00:00
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
static Vector<ProcessID> all_pids();
|
2020-08-02 02:04:56 +00:00
|
|
|
static AK::NonnullRefPtrVector<Process> all_processes();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-11-17 03:51:34 +00:00
|
|
|
template<typename EntryFunction>
|
|
|
|
RefPtr<Thread> create_kernel_thread(EntryFunction entry, u32 priority, const String& name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true)
|
|
|
|
{
|
|
|
|
auto* entry_func = new EntryFunction(move(entry));
|
|
|
|
return create_kernel_thread([](void* data) {
|
|
|
|
EntryFunction* func = reinterpret_cast<EntryFunction*>(data);
|
|
|
|
(*func)();
|
|
|
|
delete func;
|
2020-11-29 23:05:27 +00:00
|
|
|
},
|
|
|
|
priority, name, affinity, joinable);
|
2020-11-17 03:51:34 +00:00
|
|
|
}
|
|
|
|
RefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, const String& name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
|
2020-06-28 21:34:31 +00:00
|
|
|
|
2019-12-11 19:36:56 +00:00
|
|
|
bool is_profiling() const { return m_profiling; }
|
|
|
|
void set_profiling(bool profiling) { m_profiling = profiling; }
|
2020-11-06 08:09:51 +00:00
|
|
|
bool should_core_dump() const { return m_should_dump_core; }
|
|
|
|
void set_dump_core(bool dump_core) { m_should_dump_core = dump_core; }
|
2019-12-11 19:36:56 +00:00
|
|
|
|
2020-12-18 13:10:10 +00:00
|
|
|
OwnPtr<KBuffer> backtrace() const;
|
2019-07-25 19:02:19 +00:00
|
|
|
|
2019-03-24 00:20:35 +00:00
|
|
|
bool is_dead() const { return m_dead; }
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-01-03 23:58:50 +00:00
|
|
|
bool is_stopped() const { return m_is_stopped; }
|
|
|
|
bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); }
|
2020-12-09 04:18:45 +00:00
|
|
|
|
2020-09-10 15:46:24 +00:00
|
|
|
bool is_kernel_process() const { return m_is_kernel_process; }
|
|
|
|
bool is_user_process() const { return !m_is_kernel_process; }
|
2018-11-07 20:19:47 +00:00
|
|
|
|
2018-11-09 00:25:31 +00:00
|
|
|
PageDirectory& page_directory() { return *m_page_directory; }
|
|
|
|
const PageDirectory& page_directory() const { return *m_page_directory; }
|
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
static RefPtr<Process> from_pid(ProcessID);
|
2020-08-08 20:04:20 +00:00
|
|
|
static SessionID get_sid_from_pgid(ProcessGroupID pgid);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
|
|
|
const String& name() const { return m_name; }
|
2020-08-08 15:32:34 +00:00
|
|
|
ProcessID pid() const { return m_pid; }
|
2020-08-08 20:04:20 +00:00
|
|
|
SessionID sid() const { return m_sid; }
|
|
|
|
bool is_session_leader() const { return m_sid.value() == m_pid.value(); }
|
2020-08-15 19:13:19 +00:00
|
|
|
ProcessGroupID pgid() const { return m_pg ? m_pg->pgid() : 0; }
|
|
|
|
bool is_group_leader() const { return pgid().value() == m_pid.value(); }
|
2020-09-07 09:53:54 +00:00
|
|
|
Span<const gid_t> extra_gids() const { return m_extra_gids; }
|
2018-11-05 14:04:19 +00:00
|
|
|
uid_t euid() const { return m_euid; }
|
|
|
|
gid_t egid() const { return m_egid; }
|
2020-06-17 12:58:00 +00:00
|
|
|
uid_t uid() const { return m_uid; }
|
|
|
|
gid_t gid() const { return m_gid; }
|
|
|
|
uid_t suid() const { return m_suid; }
|
|
|
|
gid_t sgid() const { return m_sgid; }
|
2020-08-08 15:32:34 +00:00
|
|
|
ProcessID ppid() const { return m_ppid; }
|
2018-10-25 08:15:13 +00:00
|
|
|
|
2020-12-25 17:27:42 +00:00
|
|
|
bool is_dumpable() const { return m_dumpable; }
|
|
|
|
void set_dumpable(bool dumpable) { m_dumpable = dumpable; }
|
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
ThreadID exec_tid() const { return m_exec_tid; }
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
|
2019-02-22 01:39:13 +00:00
|
|
|
mode_t umask() const { return m_umask; }
|
|
|
|
|
2019-02-27 11:32:53 +00:00
|
|
|
bool in_group(gid_t) const;
|
|
|
|
|
2020-01-07 14:53:42 +00:00
|
|
|
RefPtr<FileDescription> file_description(int fd) const;
|
2019-09-28 20:00:38 +00:00
|
|
|
int fd_flags(int fd) const;
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2019-05-28 09:53:16 +00:00
|
|
|
template<typename Callback>
|
|
|
|
static void for_each(Callback);
|
|
|
|
template<typename Callback>
|
2020-08-08 20:04:20 +00:00
|
|
|
static void for_each_in_pgrp(ProcessGroupID, Callback);
|
2019-05-28 09:53:16 +00:00
|
|
|
template<typename Callback>
|
|
|
|
void for_each_child(Callback);
|
|
|
|
template<typename Callback>
|
2021-01-23 06:24:33 +00:00
|
|
|
IterationDecision for_each_thread(Callback) const;
|
2018-11-02 13:06:48 +00:00
|
|
|
|
2021-01-28 07:41:18 +00:00
|
|
|
template<typename Callback>
|
|
|
|
IterationDecision for_each_thread_in_coredump(Callback) const;
|
|
|
|
|
2019-01-30 17:26:19 +00:00
|
|
|
void die();
|
2020-12-09 04:18:45 +00:00
|
|
|
void finalize();
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2020-08-02 02:04:56 +00:00
|
|
|
ALWAYS_INLINE SpinLock<u32>& get_lock() const { return m_lock; }
|
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
ThreadTracer* tracer() { return m_tracer.ptr(); }
|
|
|
|
bool is_traced() const { return !!m_tracer; }
|
|
|
|
void start_tracing_from(ProcessID tracer);
|
|
|
|
void stop_tracing();
|
|
|
|
void tracer_trap(Thread&, const RegisterState&);
|
|
|
|
|
2019-11-09 21:18:16 +00:00
|
|
|
int sys$yield();
|
|
|
|
int sys$sync();
|
|
|
|
int sys$beep();
|
2020-08-01 09:37:40 +00:00
|
|
|
int sys$get_process_name(Userspace<char*> buffer, size_t buffer_size);
|
|
|
|
int sys$set_process_name(Userspace<const char*> user_name, size_t user_name_length);
|
|
|
|
int sys$watch_file(Userspace<const char*> path, size_t path_length);
|
2019-07-21 17:45:31 +00:00
|
|
|
int sys$dbgputch(u8);
|
2020-08-01 09:37:40 +00:00
|
|
|
int sys$dbgputstr(Userspace<const u8*>, int length);
|
2019-07-21 07:59:17 +00:00
|
|
|
int sys$dump_backtrace();
|
2020-08-08 23:08:24 +00:00
|
|
|
pid_t sys$gettid();
|
|
|
|
int sys$donate(pid_t tid);
|
2019-04-08 23:10:00 +00:00
|
|
|
int sys$ftruncate(int fd, off_t);
|
2018-11-02 11:56:51 +00:00
|
|
|
pid_t sys$setsid();
|
|
|
|
pid_t sys$getsid(pid_t);
|
|
|
|
int sys$setpgid(pid_t pid, pid_t pgid);
|
|
|
|
pid_t sys$getpgrp();
|
|
|
|
pid_t sys$getpgid(pid_t);
|
2018-10-16 09:01:38 +00:00
|
|
|
uid_t sys$getuid();
|
2018-10-22 11:55:11 +00:00
|
|
|
gid_t sys$getgid();
|
2018-11-05 14:04:19 +00:00
|
|
|
uid_t sys$geteuid();
|
|
|
|
gid_t sys$getegid();
|
2018-10-22 11:55:11 +00:00
|
|
|
pid_t sys$getpid();
|
2018-11-06 12:33:06 +00:00
|
|
|
pid_t sys$getppid();
|
2020-08-09 19:34:08 +00:00
|
|
|
int sys$getresuid(Userspace<uid_t*>, Userspace<uid_t*>, Userspace<uid_t*>);
|
2020-08-09 19:37:47 +00:00
|
|
|
int sys$getresgid(Userspace<gid_t*>, Userspace<gid_t*>, Userspace<gid_t*>);
|
2018-11-06 12:40:23 +00:00
|
|
|
mode_t sys$umask(mode_t);
|
2020-08-05 05:16:59 +00:00
|
|
|
int sys$open(Userspace<const Syscall::SC_open_params*>);
|
2018-10-16 09:01:38 +00:00
|
|
|
int sys$close(int fd);
|
2020-07-31 14:28:37 +00:00
|
|
|
ssize_t sys$read(int fd, Userspace<u8*>, ssize_t);
|
2019-07-03 19:17:35 +00:00
|
|
|
ssize_t sys$write(int fd, const u8*, ssize_t);
|
2020-09-15 18:18:44 +00:00
|
|
|
ssize_t sys$writev(int fd, Userspace<const struct iovec*> iov, int iov_count);
|
2020-08-09 21:57:50 +00:00
|
|
|
int sys$fstat(int fd, Userspace<stat*>);
|
2020-07-31 14:28:37 +00:00
|
|
|
int sys$stat(Userspace<const Syscall::SC_stat_params*>);
|
2018-10-31 16:50:43 +00:00
|
|
|
int sys$lseek(int fd, off_t, int whence);
|
2020-08-08 15:32:34 +00:00
|
|
|
int sys$kill(pid_t pid_or_pgid, int sig);
|
2019-02-15 11:30:48 +00:00
|
|
|
[[noreturn]] void sys$exit(int status);
|
2020-02-15 23:15:37 +00:00
|
|
|
int sys$sigreturn(RegisterState& registers);
|
2020-08-05 05:29:28 +00:00
|
|
|
pid_t sys$waitid(Userspace<const Syscall::SC_waitid_params*>);
|
2020-08-05 05:30:40 +00:00
|
|
|
void* sys$mmap(Userspace<const Syscall::SC_mmap_params*>);
|
2020-12-29 01:11:47 +00:00
|
|
|
void* sys$mremap(Userspace<const Syscall::SC_mremap_params*>);
|
2018-10-24 07:48:24 +00:00
|
|
|
int sys$munmap(void*, size_t size);
|
2020-08-05 05:38:00 +00:00
|
|
|
int sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*>);
|
2019-08-12 17:33:24 +00:00
|
|
|
int sys$mprotect(void*, size_t, int prot);
|
2019-12-09 18:12:38 +00:00
|
|
|
int sys$madvise(void*, size_t, int advice);
|
2019-12-29 12:16:53 +00:00
|
|
|
int sys$purge(int mode);
|
2019-01-15 22:12:20 +00:00
|
|
|
int sys$select(const Syscall::SC_select_params*);
|
2020-08-05 06:15:42 +00:00
|
|
|
int sys$poll(Userspace<const Syscall::SC_poll_params*>);
|
2019-02-25 20:19:57 +00:00
|
|
|
ssize_t sys$get_dir_entries(int fd, void*, ssize_t);
|
2021-01-16 14:48:56 +00:00
|
|
|
int sys$getcwd(Userspace<char*>, size_t);
|
2020-07-31 14:34:47 +00:00
|
|
|
int sys$chdir(Userspace<const char*>, size_t);
|
2019-09-11 23:18:25 +00:00
|
|
|
int sys$fchdir(int fd);
|
2018-10-25 11:53:49 +00:00
|
|
|
int sys$sleep(unsigned seconds);
|
2019-02-03 15:11:28 +00:00
|
|
|
int sys$usleep(useconds_t usec);
|
Kernel+LibC: Add adjtime(2)
Most systems (Linux, OpenBSD) adjust 0.5 ms per second, or 0.5 us per
1 ms tick. That is, the clock is sped up or slowed down by at most
0.05%. This means adjusting the clock by 1 s takes 2000 s, and the
clock an be adjusted by at most 1.8 s per hour.
FreeBSD adjusts 5 ms per second if the remaining time adjustment is
>= 1 s (0.5%) , else it adjusts by 0.5 ms as well. This allows adjusting
by (almost) 18 s per hour.
Since Serenity OS can lose more than 22 s per hour (#3429), this
picks an adjustment rate up to 1% for now. This allows us to
adjust up to 36s per hour, which should be sufficient to adjust
the clock fast enough to keep up with how much time the clock
currently loses. Once we have a fancier NTP implementation that can
adjust tick rate in addition to offset, we can think about reducing
this.
adjtime is a bit old-school and most current POSIX-y OSs instead
implement adjtimex/ntp_adjtime, but a) we have to start somewhere
b) ntp_adjtime() is a fairly gnarly API. OpenBSD's adjfreq looks
like it might provide similar functionality with a nicer API. But
before worrying about all this, it's probably a good idea to get
to a place where the kernel APIs are (barely) good enough so that
we can write an ntp service, and once we have that we should write
a way to automatically evaluate how well it keeps the time adjusted,
and only then should we add improvements ot the adjustment mechanism.
2020-11-05 21:00:51 +00:00
|
|
|
int sys$adjtime(Userspace<const timeval*>, Userspace<timeval*>);
|
2020-08-05 05:18:51 +00:00
|
|
|
int sys$gettimeofday(Userspace<timeval*>);
|
2020-08-09 19:42:51 +00:00
|
|
|
int sys$clock_gettime(clockid_t, Userspace<timespec*>);
|
2020-08-09 19:48:49 +00:00
|
|
|
int sys$clock_settime(clockid_t, Userspace<const timespec*>);
|
2020-08-05 05:24:45 +00:00
|
|
|
int sys$clock_nanosleep(Userspace<const Syscall::SC_clock_nanosleep_params*>);
|
2020-08-09 19:51:41 +00:00
|
|
|
int sys$gethostname(Userspace<char*>, ssize_t);
|
2020-08-09 19:53:12 +00:00
|
|
|
int sys$sethostname(Userspace<const char*>, ssize_t);
|
2020-08-09 20:02:27 +00:00
|
|
|
int sys$uname(Userspace<utsname*>);
|
2020-08-05 05:32:19 +00:00
|
|
|
int sys$readlink(Userspace<const Syscall::SC_readlink_params*>);
|
2020-08-04 14:27:52 +00:00
|
|
|
int sys$ttyname(int fd, Userspace<char*>, size_t);
|
|
|
|
int sys$ptsname(int fd, Userspace<char*>, size_t);
|
2020-02-15 23:15:37 +00:00
|
|
|
pid_t sys$fork(RegisterState&);
|
2020-08-09 19:11:13 +00:00
|
|
|
int sys$execve(Userspace<const Syscall::SC_execve_params*>);
|
2020-08-15 08:54:00 +00:00
|
|
|
int sys$dup2(int old_fd, int new_fd);
|
2019-01-23 05:53:01 +00:00
|
|
|
int sys$sigaction(int signum, const sigaction* act, sigaction* old_act);
|
2020-08-09 21:58:31 +00:00
|
|
|
int sys$sigprocmask(int how, Userspace<const sigset_t*> set, Userspace<sigset_t*> old_set);
|
2020-08-09 21:58:50 +00:00
|
|
|
int sys$sigpending(Userspace<sigset_t*>);
|
2020-08-09 19:20:04 +00:00
|
|
|
int sys$getgroups(ssize_t, Userspace<gid_t*>);
|
2020-08-09 19:17:12 +00:00
|
|
|
int sys$setgroups(ssize_t, Userspace<const gid_t*>);
|
2019-08-05 12:29:05 +00:00
|
|
|
int sys$pipe(int pipefd[2], int flags);
|
2020-08-08 15:32:34 +00:00
|
|
|
int sys$killpg(pid_t pgrp, int sig);
|
2020-06-17 12:58:00 +00:00
|
|
|
int sys$seteuid(uid_t);
|
|
|
|
int sys$setegid(gid_t);
|
2018-11-10 23:20:53 +00:00
|
|
|
int sys$setuid(uid_t);
|
2020-06-17 12:58:00 +00:00
|
|
|
int sys$setgid(gid_t);
|
|
|
|
int sys$setresuid(uid_t, uid_t, uid_t);
|
|
|
|
int sys$setresgid(gid_t, gid_t, gid_t);
|
2018-11-10 23:20:53 +00:00
|
|
|
unsigned sys$alarm(unsigned seconds);
|
2020-08-01 17:38:31 +00:00
|
|
|
int sys$access(Userspace<const char*> pathname, size_t path_length, int mode);
|
2019-07-03 19:17:35 +00:00
|
|
|
int sys$fcntl(int fd, int cmd, u32 extra_arg);
|
2020-05-23 11:17:58 +00:00
|
|
|
int sys$ioctl(int fd, unsigned request, FlatPtr arg);
|
2020-08-01 17:38:31 +00:00
|
|
|
int sys$mkdir(Userspace<const char*> pathname, size_t path_length, mode_t mode);
|
2020-08-09 19:26:56 +00:00
|
|
|
clock_t sys$times(Userspace<tms*>);
|
2020-07-31 14:38:47 +00:00
|
|
|
int sys$utime(Userspace<const char*> pathname, size_t path_length, Userspace<const struct utimbuf*>);
|
2020-08-03 16:40:28 +00:00
|
|
|
int sys$link(Userspace<const Syscall::SC_link_params*>);
|
2020-08-09 22:02:04 +00:00
|
|
|
int sys$unlink(Userspace<const char*> pathname, size_t path_length);
|
2020-08-03 16:40:28 +00:00
|
|
|
int sys$symlink(Userspace<const Syscall::SC_symlink_params*>);
|
2020-08-01 17:38:31 +00:00
|
|
|
int sys$rmdir(Userspace<const char*> pathname, size_t path_length);
|
2020-08-07 03:49:16 +00:00
|
|
|
int sys$mount(Userspace<const Syscall::SC_mount_params*>);
|
2020-08-09 22:08:07 +00:00
|
|
|
int sys$umount(Userspace<const char*> mountpoint, size_t mountpoint_length);
|
2020-08-09 22:09:10 +00:00
|
|
|
int sys$chmod(Userspace<const char*> pathname, size_t path_length, mode_t);
|
2019-03-01 09:39:19 +00:00
|
|
|
int sys$fchmod(int fd, mode_t);
|
2020-08-07 03:52:24 +00:00
|
|
|
int sys$chown(Userspace<const Syscall::SC_chown_params*>);
|
2019-06-01 18:31:36 +00:00
|
|
|
int sys$fchown(int fd, uid_t, gid_t);
|
2019-02-14 13:17:38 +00:00
|
|
|
int sys$socket(int domain, int type, int protocol);
|
2020-08-09 22:13:43 +00:00
|
|
|
int sys$bind(int sockfd, Userspace<const sockaddr*> addr, socklen_t);
|
2019-02-14 13:17:38 +00:00
|
|
|
int sys$listen(int sockfd, int backlog);
|
2020-08-09 22:23:13 +00:00
|
|
|
int sys$accept(int sockfd, Userspace<sockaddr*>, Userspace<socklen_t*>);
|
2020-08-09 22:25:53 +00:00
|
|
|
int sys$connect(int sockfd, Userspace<const sockaddr*>, socklen_t);
|
2020-02-07 23:52:33 +00:00
|
|
|
int sys$shutdown(int sockfd, int how);
|
2020-09-16 15:45:00 +00:00
|
|
|
ssize_t sys$sendmsg(int sockfd, Userspace<const struct msghdr*>, int flags);
|
|
|
|
ssize_t sys$recvmsg(int sockfd, Userspace<struct msghdr*>, int flags);
|
2020-08-07 09:29:05 +00:00
|
|
|
int sys$getsockopt(Userspace<const Syscall::SC_getsockopt_params*>);
|
2020-08-07 07:18:20 +00:00
|
|
|
int sys$setsockopt(Userspace<const Syscall::SC_setsockopt_params*>);
|
2020-08-07 07:03:37 +00:00
|
|
|
int sys$getsockname(Userspace<const Syscall::SC_getsockname_params*>);
|
2020-08-07 07:00:17 +00:00
|
|
|
int sys$getpeername(Userspace<const Syscall::SC_getpeername_params*>);
|
2020-08-02 01:06:29 +00:00
|
|
|
int sys$sched_setparam(pid_t pid, Userspace<const struct sched_param*>);
|
2020-08-02 00:55:28 +00:00
|
|
|
int sys$sched_getparam(pid_t pid, Userspace<struct sched_param*>);
|
2020-08-05 07:11:56 +00:00
|
|
|
int sys$create_thread(void* (*)(void*), Userspace<const Syscall::SC_create_thread_params*>);
|
2020-08-09 22:45:51 +00:00
|
|
|
void sys$exit_thread(Userspace<void*>);
|
2020-08-09 22:45:39 +00:00
|
|
|
int sys$join_thread(pid_t tid, Userspace<void**> exit_value);
|
2020-08-08 23:08:24 +00:00
|
|
|
int sys$detach_thread(pid_t tid);
|
2020-08-09 22:27:23 +00:00
|
|
|
int sys$set_thread_name(pid_t tid, Userspace<const char*> buffer, size_t buffer_size);
|
2020-08-09 22:29:55 +00:00
|
|
|
int sys$get_thread_name(pid_t tid, Userspace<char*> buffer, size_t buffer_size);
|
2020-08-05 07:05:08 +00:00
|
|
|
int sys$rename(Userspace<const Syscall::SC_rename_params*>);
|
2020-08-05 07:02:19 +00:00
|
|
|
int sys$mknod(Userspace<const Syscall::SC_mknod_params*>);
|
2019-07-19 11:08:26 +00:00
|
|
|
int sys$halt();
|
2019-07-19 07:58:12 +00:00
|
|
|
int sys$reboot();
|
2020-08-07 09:37:39 +00:00
|
|
|
int sys$realpath(Userspace<const Syscall::SC_realpath_params*>);
|
2020-08-09 22:35:06 +00:00
|
|
|
ssize_t sys$getrandom(Userspace<void*>, size_t, unsigned int);
|
2021-01-30 20:35:54 +00:00
|
|
|
int sys$getkeymap(Userspace<const Syscall::SC_getkeymap_params*>);
|
2020-08-01 23:34:01 +00:00
|
|
|
int sys$setkeymap(Userspace<const Syscall::SC_setkeymap_params*>);
|
2020-08-09 22:36:41 +00:00
|
|
|
int sys$module_load(Userspace<const char*> path, size_t path_length);
|
2020-08-09 22:37:45 +00:00
|
|
|
int sys$module_unload(Userspace<const char*> name, size_t name_length);
|
2019-12-11 19:36:56 +00:00
|
|
|
int sys$profiling_enable(pid_t);
|
|
|
|
int sys$profiling_disable(pid_t);
|
2020-08-05 06:53:23 +00:00
|
|
|
int sys$futex(Userspace<const Syscall::SC_futex_params*>);
|
2020-08-09 22:39:05 +00:00
|
|
|
int sys$chroot(Userspace<const char*> path, size_t path_length, int mount_flags);
|
2020-08-01 23:17:45 +00:00
|
|
|
int sys$pledge(Userspace<const Syscall::SC_pledge_params*>);
|
2020-08-01 23:12:09 +00:00
|
|
|
int sys$unveil(Userspace<const Syscall::SC_unveil_params*>);
|
2020-03-08 09:36:51 +00:00
|
|
|
int sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2);
|
2020-03-16 18:06:33 +00:00
|
|
|
int sys$get_stack_bounds(FlatPtr* stack_base, size_t* stack_size);
|
2020-08-01 22:25:19 +00:00
|
|
|
int sys$ptrace(Userspace<const Syscall::SC_ptrace_params*>);
|
2020-06-24 20:57:37 +00:00
|
|
|
int sys$sendfd(int sockfd, int fd);
|
|
|
|
int sys$recvfd(int sockfd);
|
2020-07-14 20:41:59 +00:00
|
|
|
long sys$sysconf(int name);
|
2020-08-08 15:32:34 +00:00
|
|
|
int sys$disown(ProcessID);
|
2020-10-10 09:17:07 +00:00
|
|
|
void* sys$allocate_tls(size_t);
|
2020-12-25 17:27:42 +00:00
|
|
|
int sys$prctl(int option, FlatPtr arg1, FlatPtr arg2);
|
2020-12-30 14:19:57 +00:00
|
|
|
int sys$set_coredump_metadata(Userspace<const Syscall::SC_set_coredump_metadata_params*>);
|
2021-01-04 20:26:32 +00:00
|
|
|
void sys$abort();
|
2021-01-15 10:28:07 +00:00
|
|
|
int sys$anon_create(size_t, int options);
|
2019-02-16 11:13:43 +00:00
|
|
|
|
2020-01-27 20:11:36 +00:00
|
|
|
template<bool sockname, typename Params>
|
|
|
|
int get_sock_or_peer_name(const Params&);
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
static void initialize();
|
|
|
|
|
2020-05-06 19:11:38 +00:00
|
|
|
[[noreturn]] void crash(int signal, u32 eip, bool out_of_memory = false);
|
2020-11-29 23:05:27 +00:00
|
|
|
static void reap(Process&);
|
|
|
|
[[nodiscard]] siginfo_t wait_info();
|
2018-10-17 22:26:30 +00:00
|
|
|
|
2018-10-30 14:33:37 +00:00
|
|
|
const TTY* tty() const { return m_tty; }
|
2020-02-16 01:01:42 +00:00
|
|
|
void set_tty(TTY*);
|
2018-10-30 14:33:37 +00:00
|
|
|
|
2019-01-31 16:31:23 +00:00
|
|
|
size_t region_count() const { return m_regions.size(); }
|
2020-08-02 02:04:56 +00:00
|
|
|
const NonnullOwnPtrVector<Region>& regions() const
|
|
|
|
{
|
|
|
|
ASSERT(m_lock.is_locked());
|
|
|
|
return m_regions;
|
|
|
|
}
|
2019-01-31 16:31:23 +00:00
|
|
|
void dump_regions();
|
2018-10-18 12:53:00 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u32 m_ticks_in_user { 0 };
|
|
|
|
u32 m_ticks_in_kernel { 0 };
|
2018-12-03 00:12:26 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u32 m_ticks_in_user_for_dead_children { 0 };
|
|
|
|
u32 m_ticks_in_kernel_for_dead_children { 0 };
|
2018-12-03 00:12:26 +00:00
|
|
|
|
2019-05-30 18:23:50 +00:00
|
|
|
Custody& current_directory();
|
2020-12-26 23:54:13 +00:00
|
|
|
Custody* executable() { return m_executable.ptr(); }
|
|
|
|
const Custody* executable() const { return m_executable.ptr(); }
|
2018-10-28 11:20:25 +00:00
|
|
|
|
2021-01-15 19:21:03 +00:00
|
|
|
const Vector<String>& arguments() const { return m_arguments; };
|
|
|
|
const Vector<String>& environment() const { return m_environment; };
|
|
|
|
|
2019-03-06 21:30:13 +00:00
|
|
|
int number_of_open_file_descriptors() const;
|
2020-07-30 21:38:15 +00:00
|
|
|
int max_open_file_descriptors() const
|
|
|
|
{
|
|
|
|
return m_max_open_file_descriptors;
|
|
|
|
}
|
2018-11-01 12:39:28 +00:00
|
|
|
|
2019-12-29 11:45:58 +00:00
|
|
|
size_t amount_clean_inode() const;
|
2019-12-29 11:28:32 +00:00
|
|
|
size_t amount_dirty_private() const;
|
2019-02-03 17:53:18 +00:00
|
|
|
size_t amount_virtual() const;
|
|
|
|
size_t amount_resident() const;
|
|
|
|
size_t amount_shared() const;
|
2019-12-09 18:12:38 +00:00
|
|
|
size_t amount_purgeable_volatile() const;
|
|
|
|
size_t amount_purgeable_nonvolatile() const;
|
2019-02-03 17:53:18 +00:00
|
|
|
|
Kernel: Tighten up exec/do_exec and allow for PT_INTERP iterpreters
This patch changes how exec() figures out which program image to
actually load. Previously, we opened the path to our main executable in
find_shebang_interpreter_for_executable, read the first page (or less,
if the file was smaller) and then decided whether to recurse with the
interpreter instead. We then then re-opened the main executable in
do_exec.
However, since we now want to parse the ELF header and Program Headers
of an elf image before even doing any memory region work, we can change
the way this whole process works. We open the file and read (up to) the
first page in exec() itself, then pass just the page and the amount read
to find_shebang_interpreter_for_executable. Since we now have that page
and the FileDescription for the main executable handy, we can do a few
things. First, validate the ELF header and ELF program headers for any
shenanigans. ELF32 Little Endian i386 only, please. Second, we can grab
the PT_INTERP interpreter from any ET_DYN files, and open that guy right
away if it exists. Finally, we can pass the main executable's and
optionally the PT_INTERP interpreter's file descriptions down to do_exec
and not have to feel guilty about opening the file twice.
In do_exec, we now have a choice. Are we going to load the main
executable, or the interpreter? We could load both, but it'll be way
easier for the inital pass on the RTLD if we only load the interpreter.
Then it can load the main executable itself like any old shared object,
just, the one with main in it :). Later on we can load both of them
into memory and the RTLD can relocate itself before trying to do
anything. The way it's written now the RTLD will get dibs on its
requested virtual addresses being the actual virtual addresses.
2020-01-11 01:28:02 +00:00
|
|
|
int exec(String path, Vector<String> arguments, Vector<String> environment, int recusion_depth = 0);
|
2018-11-02 19:41:58 +00:00
|
|
|
|
2020-10-10 09:13:21 +00:00
|
|
|
struct LoadResult {
|
2020-10-17 11:39:36 +00:00
|
|
|
FlatPtr load_base { 0 };
|
2020-10-10 09:13:21 +00:00
|
|
|
FlatPtr entry_eip { 0 };
|
|
|
|
size_t size { 0 };
|
|
|
|
FlatPtr program_headers { 0 };
|
|
|
|
size_t num_program_headers { 0 };
|
2020-12-25 15:20:26 +00:00
|
|
|
WeakPtr<Region> tls_region;
|
2020-10-10 09:13:21 +00:00
|
|
|
size_t tls_size { 0 };
|
|
|
|
size_t tls_alignment { 0 };
|
2020-12-25 15:20:26 +00:00
|
|
|
WeakPtr<Region> stack_region;
|
2020-10-10 09:13:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum class ShouldAllocateTls {
|
|
|
|
No = 0,
|
|
|
|
Yes,
|
|
|
|
};
|
|
|
|
|
2021-01-10 19:07:08 +00:00
|
|
|
KResultOr<LoadResult> load(NonnullRefPtr<FileDescription> main_program_description, RefPtr<FileDescription> interpreter_description, const Elf32_Ehdr& main_program_header);
|
2020-10-10 09:13:21 +00:00
|
|
|
KResultOr<LoadResult> load_elf_object(FileDescription& object_description, FlatPtr load_offset, ShouldAllocateTls);
|
2021-01-10 19:10:45 +00:00
|
|
|
KResultOr<FlatPtr> get_interpreter_load_offset(const Elf32_Ehdr& main_program_header, FileDescription& main_program_description, FileDescription& interpreter_description);
|
2020-10-10 09:13:21 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
bool is_superuser() const
|
|
|
|
{
|
|
|
|
return m_euid == 0;
|
|
|
|
}
|
2018-11-07 00:38:51 +00:00
|
|
|
|
2021-01-15 16:27:52 +00:00
|
|
|
KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool shared);
|
|
|
|
KResultOr<Region*> allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
2019-02-16 11:13:43 +00:00
|
|
|
bool deallocate_region(Region& region);
|
2019-02-16 08:57:42 +00:00
|
|
|
|
2019-12-19 18:13:44 +00:00
|
|
|
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
|
2019-12-30 20:11:25 +00:00
|
|
|
Vector<Region*, 2> split_region_around_range(const Region& source_region, const Range&);
|
2019-08-29 18:57:02 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
void terminate_due_to_signal(u8 signal);
|
2020-05-16 10:33:48 +00:00
|
|
|
KResult send_signal(u8 signal, Process* sender);
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2021-01-03 20:46:38 +00:00
|
|
|
u8 termination_signal() const { return m_termination_signal; }
|
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
u16 thread_count() const
|
|
|
|
{
|
2020-11-29 23:05:27 +00:00
|
|
|
return m_thread_count.load(AK::MemoryOrder::memory_order_relaxed);
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
2019-12-22 10:35:02 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
Lock& big_lock()
|
|
|
|
{
|
|
|
|
return m_big_lock;
|
|
|
|
}
|
2019-04-01 18:02:05 +00:00
|
|
|
|
2020-01-10 22:14:04 +00:00
|
|
|
Custody& root_directory();
|
2020-01-12 18:42:01 +00:00
|
|
|
Custody& root_directory_relative_to_global_root();
|
2020-01-10 22:14:04 +00:00
|
|
|
void set_root_directory(const Custody&);
|
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
bool has_promises() const
|
|
|
|
{
|
2021-01-25 21:42:36 +00:00
|
|
|
return m_has_promises;
|
2020-07-30 21:38:15 +00:00
|
|
|
}
|
|
|
|
bool has_promised(Pledge pledge) const
|
|
|
|
{
|
|
|
|
return m_promises & (1u << (u32)pledge);
|
|
|
|
}
|
2020-01-11 19:48:43 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
VeilState veil_state() const
|
|
|
|
{
|
|
|
|
return m_veil_state;
|
|
|
|
}
|
2020-12-26 10:24:34 +00:00
|
|
|
const UnveilNode& unveiled_paths() const
|
2020-07-30 21:38:15 +00:00
|
|
|
{
|
|
|
|
return m_unveiled_paths;
|
|
|
|
}
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
bool wait_for_tracer_at_next_execve() const
|
|
|
|
{
|
|
|
|
return m_wait_for_tracer_at_next_execve;
|
|
|
|
}
|
|
|
|
void set_wait_for_tracer_at_next_execve(bool val)
|
|
|
|
{
|
|
|
|
m_wait_for_tracer_at_next_execve = val;
|
|
|
|
}
|
2020-04-07 15:23:37 +00:00
|
|
|
|
2020-08-01 22:25:19 +00:00
|
|
|
KResultOr<u32> peek_user_data(Userspace<const u32*> address);
|
|
|
|
KResult poke_user_data(Userspace<u32*> address, u32 data);
|
2020-04-07 15:23:37 +00:00
|
|
|
|
2020-12-09 02:04:05 +00:00
|
|
|
void disowned_by_waiter(Process& process);
|
2020-12-09 04:18:45 +00:00
|
|
|
void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
|
2020-11-29 23:05:27 +00:00
|
|
|
Thread::WaitBlockCondition& wait_block_condition() { return m_wait_block_condition; }
|
|
|
|
|
2021-01-20 19:10:13 +00:00
|
|
|
HashMap<String, String>& coredump_metadata() { return m_coredump_metadata; }
|
2020-12-30 14:19:57 +00:00
|
|
|
const HashMap<String, String>& coredump_metadata() const { return m_coredump_metadata; }
|
|
|
|
|
2021-01-28 07:41:18 +00:00
|
|
|
const NonnullRefPtrVector<Thread>& threads_for_coredump(Badge<CoreDump>) const { return m_threads_for_coredump; }
|
|
|
|
|
2021-01-11 08:52:18 +00:00
|
|
|
PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
|
|
|
|
|
2018-10-16 09:01:38 +00:00
|
|
|
private:
|
2018-10-18 11:05:00 +00:00
|
|
|
friend class MemoryManager;
|
2018-11-07 21:15:02 +00:00
|
|
|
friend class Scheduler;
|
2018-11-08 13:35:30 +00:00
|
|
|
friend class Region;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2021-01-23 06:24:33 +00:00
|
|
|
bool add_thread(Thread&);
|
|
|
|
bool remove_thread(Thread&);
|
|
|
|
|
2021-01-11 08:52:18 +00:00
|
|
|
PerformanceEventBuffer& ensure_perf_events();
|
|
|
|
|
2020-09-27 14:53:35 +00:00
|
|
|
Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
2020-08-08 15:32:34 +00:00
|
|
|
static ProcessID allocate_pid();
|
2018-10-25 09:15:17 +00:00
|
|
|
|
2021-01-27 20:01:45 +00:00
|
|
|
Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
2019-05-17 02:39:22 +00:00
|
|
|
|
2020-01-19 15:25:38 +00:00
|
|
|
Region& add_region(NonnullOwnPtr<Region>);
|
|
|
|
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
void kill_threads_except_self();
|
|
|
|
void kill_all_threads();
|
2021-01-11 17:53:45 +00:00
|
|
|
bool dump_core();
|
|
|
|
bool dump_perfcore();
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
|
2021-01-10 19:07:08 +00:00
|
|
|
int do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const Elf32_Ehdr& main_program_header);
|
2020-09-12 03:11:07 +00:00
|
|
|
ssize_t do_write(FileDescription&, const UserOrKernelBuffer&, size_t);
|
2018-10-16 09:01:38 +00:00
|
|
|
|
2021-01-10 19:07:08 +00:00
|
|
|
KResultOr<RefPtr<FileDescription>> find_elf_interpreter_for_executable(const String& path, const Elf32_Ehdr& elf_header, int nread, size_t file_size);
|
Kernel: Tighten up exec/do_exec and allow for PT_INTERP iterpreters
This patch changes how exec() figures out which program image to
actually load. Previously, we opened the path to our main executable in
find_shebang_interpreter_for_executable, read the first page (or less,
if the file was smaller) and then decided whether to recurse with the
interpreter instead. We then then re-opened the main executable in
do_exec.
However, since we now want to parse the ELF header and Program Headers
of an elf image before even doing any memory region work, we can change
the way this whole process works. We open the file and read (up to) the
first page in exec() itself, then pass just the page and the amount read
to find_shebang_interpreter_for_executable. Since we now have that page
and the FileDescription for the main executable handy, we can do a few
things. First, validate the ELF header and ELF program headers for any
shenanigans. ELF32 Little Endian i386 only, please. Second, we can grab
the PT_INTERP interpreter from any ET_DYN files, and open that guy right
away if it exists. Finally, we can pass the main executable's and
optionally the PT_INTERP interpreter's file descriptions down to do_exec
and not have to feel guilty about opening the file twice.
In do_exec, we now have a choice. Are we going to load the main
executable, or the interpreter? We could load both, but it'll be way
easier for the inital pass on the RTLD if we only load the interpreter.
Then it can load the main executable itself like any old shared object,
just, the one with main in it :). Later on we can load both of them
into memory and the RTLD can relocate itself before trying to do
anything. The way it's written now the RTLD will get dibs on its
requested virtual addresses being the actual virtual addresses.
2020-01-11 01:28:02 +00:00
|
|
|
|
2019-04-06 12:54:32 +00:00
|
|
|
int alloc_fd(int first_candidate_fd = 0);
|
2018-11-12 00:28:46 +00:00
|
|
|
|
2019-11-14 16:16:30 +00:00
|
|
|
KResult do_kill(Process&, int signal);
|
2020-08-08 23:08:24 +00:00
|
|
|
KResult do_killpg(ProcessGroupID pgrp, int signal);
|
2020-04-26 00:45:23 +00:00
|
|
|
KResult do_killall(int signal);
|
|
|
|
KResult do_killself(int signal);
|
2019-11-14 16:16:30 +00:00
|
|
|
|
2020-02-05 16:42:43 +00:00
|
|
|
KResultOr<siginfo_t> do_waitid(idtype_t idtype, int id, int options);
|
|
|
|
|
2020-01-11 11:47:47 +00:00
|
|
|
KResultOr<String> get_syscall_path_argument(const char* user_path, size_t path_length) const;
|
2020-07-31 14:28:37 +00:00
|
|
|
KResultOr<String> get_syscall_path_argument(Userspace<const char*> user_path, size_t path_length) const
|
|
|
|
{
|
2020-07-31 16:46:55 +00:00
|
|
|
return get_syscall_path_argument(user_path.unsafe_userspace_ptr(), path_length);
|
2020-07-31 14:28:37 +00:00
|
|
|
}
|
2020-01-11 11:47:47 +00:00
|
|
|
KResultOr<String> get_syscall_path_argument(const Syscall::StringArgument&) const;
|
2020-01-06 10:05:59 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
bool has_tracee_thread(ProcessID tracer_pid);
|
2020-03-28 08:47:16 +00:00
|
|
|
|
2020-12-22 06:21:58 +00:00
|
|
|
void clear_futex_queues_on_exec();
|
|
|
|
|
2019-06-21 16:37:47 +00:00
|
|
|
RefPtr<PageDirectory> m_page_directory;
|
2018-11-01 08:01:51 +00:00
|
|
|
|
2018-11-01 12:15:46 +00:00
|
|
|
Process* m_prev { nullptr };
|
|
|
|
Process* m_next { nullptr };
|
2018-10-16 09:01:38 +00:00
|
|
|
|
|
|
|
String m_name;
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
ProcessID m_pid { 0 };
|
2020-08-08 20:04:20 +00:00
|
|
|
SessionID m_sid { 0 };
|
2020-08-15 19:13:19 +00:00
|
|
|
RefPtr<ProcessGroup> m_pg;
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2020-06-17 12:58:00 +00:00
|
|
|
uid_t m_euid { 0 };
|
|
|
|
gid_t m_egid { 0 };
|
|
|
|
uid_t m_uid { 0 };
|
|
|
|
gid_t m_gid { 0 };
|
|
|
|
uid_t m_suid { 0 };
|
|
|
|
gid_t m_sgid { 0 };
|
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
ThreadID m_exec_tid { 0 };
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 12:28:28 +00:00
|
|
|
|
2020-12-09 04:18:45 +00:00
|
|
|
OwnPtr<ThreadTracer> m_tracer;
|
|
|
|
|
2019-08-08 12:56:50 +00:00
|
|
|
static const int m_max_open_file_descriptors { FD_SETSIZE };
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2020-07-30 21:50:31 +00:00
|
|
|
class FileDescriptionAndFlags {
|
|
|
|
public:
|
|
|
|
operator bool() const { return !!m_description; }
|
|
|
|
|
|
|
|
FileDescription* description() { return m_description; }
|
|
|
|
const FileDescription* description() const { return m_description; }
|
|
|
|
|
|
|
|
u32 flags() const { return m_flags; }
|
|
|
|
void set_flags(u32 flags) { m_flags = flags; }
|
|
|
|
|
2019-04-29 02:55:54 +00:00
|
|
|
void clear();
|
2020-07-30 21:50:31 +00:00
|
|
|
void set(NonnullRefPtr<FileDescription>&&, u32 flags = 0);
|
|
|
|
|
|
|
|
private:
|
|
|
|
RefPtr<FileDescription> m_description;
|
|
|
|
u32 m_flags { 0 };
|
2018-11-13 00:36:31 +00:00
|
|
|
};
|
2019-06-07 07:36:51 +00:00
|
|
|
Vector<FileDescriptionAndFlags> m_fds;
|
2018-11-07 17:30:59 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u8 m_termination_status { 0 };
|
|
|
|
u8 m_termination_signal { 0 };
|
2020-08-17 14:34:08 +00:00
|
|
|
Atomic<u32> m_thread_count { 0 };
|
2021-01-23 06:24:33 +00:00
|
|
|
mutable IntrusiveList<Thread, &Thread::m_process_thread_list_node> m_thread_list;
|
|
|
|
mutable RecursiveSpinLock m_thread_list_lock;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2020-09-10 15:46:24 +00:00
|
|
|
const bool m_is_kernel_process;
|
2019-08-08 12:56:50 +00:00
|
|
|
bool m_dead { false };
|
2019-12-11 19:36:56 +00:00
|
|
|
bool m_profiling { false };
|
2021-01-03 23:58:50 +00:00
|
|
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_stopped { false };
|
2020-11-06 08:09:51 +00:00
|
|
|
bool m_should_dump_core { false };
|
2019-08-08 12:56:50 +00:00
|
|
|
|
2019-06-21 16:37:47 +00:00
|
|
|
RefPtr<Custody> m_executable;
|
|
|
|
RefPtr<Custody> m_cwd;
|
2020-01-10 22:14:04 +00:00
|
|
|
RefPtr<Custody> m_root_directory;
|
2020-01-12 18:42:01 +00:00
|
|
|
RefPtr<Custody> m_root_directory_relative_to_global_root;
|
2018-10-24 12:28:22 +00:00
|
|
|
|
2021-01-15 19:21:03 +00:00
|
|
|
Vector<String> m_arguments;
|
|
|
|
Vector<String> m_environment;
|
|
|
|
|
2019-11-06 15:52:54 +00:00
|
|
|
RefPtr<TTY> m_tty;
|
2018-10-30 12:59:29 +00:00
|
|
|
|
2020-07-30 21:52:28 +00:00
|
|
|
Region* find_region_from_range(const Range&);
|
|
|
|
Region* find_region_containing(const Range&);
|
2018-10-18 12:53:00 +00:00
|
|
|
|
2019-09-27 12:19:07 +00:00
|
|
|
NonnullOwnPtrVector<Region> m_regions;
|
2020-01-19 15:44:37 +00:00
|
|
|
struct RegionLookupCache {
|
2021-01-27 20:01:45 +00:00
|
|
|
Optional<Range> range;
|
2020-02-24 12:24:30 +00:00
|
|
|
WeakPtr<Region> region;
|
2020-01-19 15:44:37 +00:00
|
|
|
};
|
|
|
|
RegionLookupCache m_region_lookup_cache;
|
2018-10-18 11:05:00 +00:00
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
ProcessID m_ppid { 0 };
|
2018-11-06 12:40:23 +00:00
|
|
|
mode_t m_umask { 022 };
|
2018-10-26 09:16:56 +00:00
|
|
|
|
2020-12-25 17:27:42 +00:00
|
|
|
bool m_dumpable { true };
|
|
|
|
|
2020-09-07 09:53:54 +00:00
|
|
|
Vector<gid_t> m_extra_gids;
|
2018-11-07 20:19:47 +00:00
|
|
|
|
2020-02-28 13:04:49 +00:00
|
|
|
WeakPtr<Region> m_master_tls_region;
|
2019-09-07 13:50:44 +00:00
|
|
|
size_t m_master_tls_size { 0 };
|
|
|
|
size_t m_master_tls_alignment { 0 };
|
|
|
|
|
2019-05-02 01:28:20 +00:00
|
|
|
Lock m_big_lock { "Process" };
|
2020-08-01 20:41:54 +00:00
|
|
|
mutable SpinLock<u32> m_lock;
|
2019-06-07 09:30:07 +00:00
|
|
|
|
2020-12-01 22:44:52 +00:00
|
|
|
RefPtr<Timer> m_alarm_timer;
|
2019-07-29 05:26:01 +00:00
|
|
|
|
2021-01-25 21:42:36 +00:00
|
|
|
bool m_has_promises { false };
|
2020-01-11 19:48:43 +00:00
|
|
|
u32 m_promises { 0 };
|
2021-01-26 14:25:18 +00:00
|
|
|
bool m_has_execpromises { false };
|
2020-01-11 19:48:43 +00:00
|
|
|
u32 m_execpromises { 0 };
|
|
|
|
|
2020-01-21 18:28:29 +00:00
|
|
|
VeilState m_veil_state { VeilState::None };
|
2020-12-26 14:24:01 +00:00
|
|
|
UnveilNode m_unveiled_paths { "/", { .full_path = "/", .unveil_inherited_from_root = true } };
|
Kernel: Add a basic implementation of unveil()
This syscall is a complement to pledge() and adds the same sort of
incremental relinquishing of capabilities for filesystem access.
The first call to unveil() will "drop a veil" on the process, and from
now on, only unveiled parts of the filesystem are visible to it.
Each call to unveil() specifies a path to either a directory or a file
along with permissions for that path. The permissions are a combination
of the following:
- r: Read access (like the "rpath" promise)
- w: Write access (like the "wpath" promise)
- x: Execute access
- c: Create/remove access (like the "cpath" promise)
Attempts to open a path that has not been unveiled with fail with
ENOENT. If the unveiled path lacks sufficient permissions, it will fail
with EACCES.
Like pledge(), subsequent calls to unveil() with the same path can only
remove permissions, not add them.
Once you call unveil(nullptr, nullptr), the veil is locked, and it's no
longer possible to unveil any more paths for the process, ever.
This concept comes from OpenBSD, and their implementation does various
things differently, I'm sure. This is just a first implementation for
SerenityOS, and we'll keep improving on it as we go. :^)
2020-01-20 21:12:04 +00:00
|
|
|
|
2020-02-02 19:26:27 +00:00
|
|
|
OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
|
2020-02-17 12:29:49 +00:00
|
|
|
|
2020-12-22 06:21:58 +00:00
|
|
|
FutexQueues m_futex_queues;
|
|
|
|
SpinLock<u8> m_futex_lock;
|
|
|
|
|
2020-03-28 08:47:16 +00:00
|
|
|
// This member is used in the implementation of ptrace's PT_TRACEME flag.
|
|
|
|
// If it is set to true, the process will stop at the next execve syscall
|
|
|
|
// and wait for a tracer to attach.
|
|
|
|
bool m_wait_for_tracer_at_next_execve { false };
|
2020-11-29 23:05:27 +00:00
|
|
|
|
|
|
|
Thread::WaitBlockCondition m_wait_block_condition;
|
2020-12-30 14:19:57 +00:00
|
|
|
|
|
|
|
HashMap<String, String> m_coredump_metadata;
|
2021-01-15 19:29:13 +00:00
|
|
|
|
2021-01-28 07:41:18 +00:00
|
|
|
NonnullRefPtrVector<Thread> m_threads_for_coredump;
|
2018-10-16 09:01:38 +00:00
|
|
|
};
|
|
|
|
|
2018-11-07 21:15:02 +00:00
|
|
|
extern InlineLinkedList<Process>* g_processes;
|
2020-06-28 21:34:31 +00:00
|
|
|
extern RecursiveSpinLock g_processes_lock;
|
2018-11-08 15:09:05 +00:00
|
|
|
|
|
|
|
template<typename Callback>
|
|
|
|
inline void Process::for_each(Callback callback)
|
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2020-06-28 21:34:31 +00:00
|
|
|
ScopedSpinLock lock(g_processes_lock);
|
2018-11-08 15:09:05 +00:00
|
|
|
for (auto* process = g_processes->head(); process;) {
|
|
|
|
auto* next_process = process->next();
|
2019-06-07 15:13:23 +00:00
|
|
|
if (callback(*process) == IterationDecision::Break)
|
2018-11-08 15:09:05 +00:00
|
|
|
break;
|
|
|
|
process = next_process;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-11 14:36:40 +00:00
|
|
|
template<typename Callback>
|
|
|
|
inline void Process::for_each_child(Callback callback)
|
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2020-08-08 15:32:34 +00:00
|
|
|
ProcessID my_pid = pid();
|
2020-06-28 21:34:31 +00:00
|
|
|
ScopedSpinLock lock(g_processes_lock);
|
2018-11-11 14:36:40 +00:00
|
|
|
for (auto* process = g_processes->head(); process;) {
|
|
|
|
auto* next_process = process->next();
|
2020-08-08 23:08:24 +00:00
|
|
|
if (process->ppid() == my_pid || process->has_tracee_thread(m_pid)) {
|
2019-07-14 09:35:49 +00:00
|
|
|
if (callback(*process) == IterationDecision::Break)
|
2018-11-11 14:36:40 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
process = next_process;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-08 15:09:05 +00:00
|
|
|
template<typename Callback>
|
2021-01-23 06:24:33 +00:00
|
|
|
inline IterationDecision Process::for_each_thread(Callback callback) const
|
2018-11-08 15:09:05 +00:00
|
|
|
{
|
2021-01-28 05:29:17 +00:00
|
|
|
ScopedSpinLock thread_list_lock(m_thread_list_lock);
|
|
|
|
for (auto& thread : m_thread_list) {
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
2019-11-26 20:25:11 +00:00
|
|
|
}
|
2021-01-23 06:24:33 +00:00
|
|
|
return IterationDecision::Continue;
|
2018-11-08 15:09:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Callback>
|
2020-08-08 20:04:20 +00:00
|
|
|
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
|
2018-11-08 15:09:05 +00:00
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2020-06-28 21:34:31 +00:00
|
|
|
ScopedSpinLock lock(g_processes_lock);
|
2018-11-08 15:09:05 +00:00
|
|
|
for (auto* process = g_processes->head(); process;) {
|
|
|
|
auto* next_process = process->next();
|
2019-12-26 21:20:39 +00:00
|
|
|
if (!process->is_dead() && process->pgid() == pgid) {
|
2019-08-22 19:12:55 +00:00
|
|
|
if (callback(*process) == IterationDecision::Break)
|
2019-03-23 21:03:17 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-11-08 15:09:05 +00:00
|
|
|
process = next_process;
|
|
|
|
}
|
|
|
|
}
|
2019-02-21 14:45:31 +00:00
|
|
|
|
2020-01-07 18:17:35 +00:00
|
|
|
inline bool InodeMetadata::may_read(const Process& process) const
|
2019-02-21 14:45:31 +00:00
|
|
|
{
|
2020-01-02 22:45:52 +00:00
|
|
|
return may_read(process.euid(), process.egid(), process.extra_gids());
|
2019-02-21 14:45:31 +00:00
|
|
|
}
|
|
|
|
|
2020-01-07 18:17:35 +00:00
|
|
|
inline bool InodeMetadata::may_write(const Process& process) const
|
2019-02-21 14:45:31 +00:00
|
|
|
{
|
2020-01-02 22:45:52 +00:00
|
|
|
return may_write(process.euid(), process.egid(), process.extra_gids());
|
2019-02-21 14:45:31 +00:00
|
|
|
}
|
|
|
|
|
2020-01-07 18:17:35 +00:00
|
|
|
inline bool InodeMetadata::may_execute(const Process& process) const
|
2019-02-21 14:45:31 +00:00
|
|
|
{
|
2020-01-02 22:45:52 +00:00
|
|
|
return may_execute(process.euid(), process.egid(), process.extra_gids());
|
2019-02-21 14:45:31 +00:00
|
|
|
}
|
2019-03-23 21:03:17 +00:00
|
|
|
|
2020-08-08 15:32:34 +00:00
|
|
|
inline ProcessID Thread::pid() const
|
2019-03-23 21:03:17 +00:00
|
|
|
{
|
2020-08-02 02:04:56 +00:00
|
|
|
return m_process->pid();
|
2019-03-23 21:03:17 +00:00
|
|
|
}
|
2019-07-08 16:58:19 +00:00
|
|
|
|
|
|
|
inline const LogStream& operator<<(const LogStream& stream, const Process& process)
|
|
|
|
{
|
2020-08-08 15:32:34 +00:00
|
|
|
return stream << process.name() << '(' << process.pid().value() << ')';
|
2019-07-08 16:58:19 +00:00
|
|
|
}
|
2019-12-30 19:10:00 +00:00
|
|
|
|
2020-06-28 21:34:31 +00:00
|
|
|
#define REQUIRE_NO_PROMISES \
|
|
|
|
do { \
|
|
|
|
if (Process::current()->has_promises()) { \
|
2021-01-08 23:11:15 +00:00
|
|
|
dbgln("Has made a promise"); \
|
2020-06-28 21:34:31 +00:00
|
|
|
cli(); \
|
|
|
|
Process::current()->crash(SIGABRT, 0); \
|
|
|
|
ASSERT_NOT_REACHED(); \
|
|
|
|
} \
|
2020-01-12 01:17:30 +00:00
|
|
|
} while (0)
|
|
|
|
|
2020-06-28 21:34:31 +00:00
|
|
|
#define REQUIRE_PROMISE(promise) \
|
|
|
|
do { \
|
|
|
|
if (Process::current()->has_promises() \
|
|
|
|
&& !Process::current()->has_promised(Pledge::promise)) { \
|
2021-01-08 23:11:15 +00:00
|
|
|
dbgln("Has not pledged {}", #promise); \
|
2020-06-28 21:34:31 +00:00
|
|
|
cli(); \
|
2021-01-20 19:10:13 +00:00
|
|
|
Process::current()->coredump_metadata().set( \
|
|
|
|
"pledge_violation", #promise); \
|
2020-06-28 21:34:31 +00:00
|
|
|
Process::current()->crash(SIGABRT, 0); \
|
|
|
|
ASSERT_NOT_REACHED(); \
|
|
|
|
} \
|
2020-01-12 01:17:30 +00:00
|
|
|
} while (0)
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
}
|
2020-09-12 03:11:07 +00:00
|
|
|
|
|
|
|
inline static String copy_string_from_user(const Kernel::Syscall::StringArgument& string)
|
|
|
|
{
|
|
|
|
return copy_string_from_user(string.characters, string.length);
|
|
|
|
}
|
2021-01-08 23:11:15 +00:00
|
|
|
|
|
|
|
template<>
|
|
|
|
struct AK::Formatter<Kernel::Process> : AK::Formatter<String> {
|
|
|
|
void format(FormatBuilder& builder, const Kernel::Process& value)
|
|
|
|
{
|
|
|
|
return AK::Formatter<String>::format(builder, String::formatted("{}({})", value.name(), value.pid().value()));
|
|
|
|
}
|
|
|
|
};
|