Преглед изворни кода

LibJS: Work-in-progress JIT compiler :^)

Andreas Kling пре 1 година
родитељ
комит
babdc0a25b

+ 10 - 0
Userland/Libraries/LibJS/Bytecode/BasicBlock.h

@@ -43,6 +43,16 @@ public:
 
     DeprecatedString const& name() const { return m_name; }
 
+    // ==============================================================
+    // FIXME: This is JIT state and shouldn't be part of BasicBlock itself.
+
+    // Offset into the instruction stream where this code block starts.
+    size_t offset { 0 };
+
+    // Offsets into the instruction stream where we have RIP-relative jump offsets to here that need patching.
+    Vector<size_t> jumps_to_here;
+    // ==============================================================
+
 private:
     explicit BasicBlock(DeprecatedString name);
 

+ 11 - 1
Userland/Libraries/LibJS/Bytecode/Interpreter.cpp

@@ -13,6 +13,7 @@
 #include <LibJS/Bytecode/Instruction.h>
 #include <LibJS/Bytecode/Interpreter.h>
 #include <LibJS/Bytecode/Op.h>
+#include <LibJS/JIT/Compiler.h>
 #include <LibJS/Runtime/AbstractOperations.h>
 #include <LibJS/Runtime/Array.h>
 #include <LibJS/Runtime/BigInt.h>
@@ -349,7 +350,16 @@ Interpreter::ValueAndFrame Interpreter::run_and_return_frame(Executable& executa
     else
         push_call_frame(make<CallFrame>(), executable.number_of_registers);
 
-    run_bytecode();
+    if (auto native_executable = JIT::Compiler::compile(executable)) {
+        native_executable->run(vm());
+
+        for (size_t i = 0; i < vm().running_execution_context().local_variables.size(); ++i) {
+            dbgln("%{}: {}", i, vm().running_execution_context().local_variables[i]);
+        }
+
+    } else {
+        run_bytecode();
+    }
 
     dbgln_if(JS_BYTECODE_DEBUG, "Bytecode::Interpreter did run unit {:p}", &executable);
 

+ 3 - 3
Userland/Libraries/LibJS/Bytecode/Interpreter.h

@@ -80,6 +80,9 @@ public:
 
     void visit_edges(Cell::Visitor&);
 
+    Span<Value> registers() { return m_current_call_frame; }
+    ReadonlySpan<Value> registers() const { return m_current_call_frame; }
+
 private:
     void run_bytecode();
 
@@ -93,9 +96,6 @@ private:
         return const_cast<Interpreter*>(this)->call_frame();
     }
 
-    Span<Value> registers() { return m_current_call_frame; }
-    ReadonlySpan<Value> registers() const { return m_current_call_frame; }
-
     void push_call_frame(Variant<NonnullOwnPtr<CallFrame>, CallFrame*>, size_t register_count);
     [[nodiscard]] Variant<NonnullOwnPtr<CallFrame>, CallFrame*> pop_call_frame();
 

+ 2 - 0
Userland/Libraries/LibJS/Bytecode/Op.h

@@ -115,6 +115,8 @@ private:
         ThrowCompletionOr<void> execute_impl(Bytecode::Interpreter&) const;            \
         DeprecatedString to_deprecated_string_impl(Bytecode::Executable const&) const; \
                                                                                        \
+        Register lhs() const { return m_lhs_reg; }                                     \
+                                                                                       \
     private:                                                                           \
         Register m_lhs_reg;                                                            \
     };

+ 3 - 0
Userland/Libraries/LibJS/CMakeLists.txt

@@ -23,6 +23,9 @@ set(SOURCES
     Heap/Heap.cpp
     Heap/HeapBlock.cpp
     Heap/MarkedVector.cpp
+    JIT/Assembler.cpp
+    JIT/Compiler.cpp
+    JIT/NativeExecutable.cpp
     Lexer.cpp
     MarkupGenerator.cpp
     Module.cpp

+ 11 - 0
Userland/Libraries/LibJS/JIT/Assembler.cpp

@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <LibJS/JIT/Assembler.h>
+
+namespace JS::JIT {
+
+}

+ 267 - 0
Userland/Libraries/LibJS/JIT/Assembler.h

@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Vector.h>
+#include <LibJS/Bytecode/BasicBlock.h>
+
+namespace JS::JIT {
+
+struct Assembler {
+    Assembler(Vector<u8>& output)
+        : m_output(output)
+    {
+    }
+
+    Vector<u8>& m_output;
+
+    enum class Reg {
+        GPR0 = 0, // RAX
+        GPR1 = 1, // RCX
+
+        Arg0 = 7, // RDI
+        Arg1 = 6, // RSI
+        Arg2 = 2, // RDX
+
+        Ret = 0, // RAX
+
+        // FIXME: These are LibJS specific.
+        RegisterArrayBase = 8, // R8
+        LocalsArrayBase = 9,   // R9
+    };
+
+    struct Operand {
+        enum class Type {
+            Reg,
+            Imm64,
+            Mem64BaseAndOffset,
+        };
+
+        Type type {};
+
+        Reg reg {};
+        u64 offset_or_immediate { 0 };
+
+        static Operand Register(Reg reg)
+        {
+            Operand operand;
+            operand.type = Type::Reg;
+            operand.reg = reg;
+            return operand;
+        }
+
+        static Operand Imm64(u64 imm64)
+        {
+            Operand operand;
+            operand.type = Type::Imm64;
+            operand.offset_or_immediate = imm64;
+            return operand;
+        }
+
+        static Operand Mem64BaseAndOffset(Reg base, u64 offset)
+        {
+            Operand operand;
+            operand.type = Type::Mem64BaseAndOffset;
+            operand.reg = base;
+            operand.offset_or_immediate = offset;
+            return operand;
+        }
+    };
+
+    static constexpr u8 encode_reg(Reg reg)
+    {
+        return to_underlying(reg) & 0x7;
+    }
+
+    void mov(Operand dst, Operand src)
+    {
+        if (dst.type == Operand::Type::Reg && src.type == Operand::Type::Reg) {
+            if (src.reg == dst.reg)
+                return;
+            emit8(0x48
+                | ((to_underlying(src.reg) >= 8) ? 1 << 2 : 0)
+                | ((to_underlying(dst.reg) >= 8) ? 1 << 0 : 0));
+            emit8(0x89);
+            emit8(0xc0 | (encode_reg(src.reg) << 3) | encode_reg(dst.reg));
+            return;
+        }
+
+        if (dst.type == Operand::Type::Reg && src.type == Operand::Type::Imm64) {
+            emit8(0x48 | ((to_underlying(dst.reg) >= 8) ? 1 << 0 : 0));
+            emit8(0xb8 | encode_reg(dst.reg));
+            emit64(src.offset_or_immediate);
+            return;
+        }
+
+        if (dst.type == Operand::Type::Mem64BaseAndOffset && src.type == Operand::Type::Reg) {
+            emit8(0x48
+                | ((to_underlying(src.reg) >= 8) ? 1 << 2 : 0)
+                | ((to_underlying(dst.reg) >= 8) ? 1 << 0 : 0));
+            emit8(0x89);
+            emit8(0x80 | (encode_reg(src.reg) << 3) | encode_reg(dst.reg));
+            emit32(dst.offset_or_immediate);
+            return;
+        }
+
+        if (dst.type == Operand::Type::Reg && src.type == Operand::Type::Mem64BaseAndOffset) {
+            emit8(0x48
+                | ((to_underlying(dst.reg) >= 8) ? 1 << 2 : 0)
+                | ((to_underlying(src.reg) >= 8) ? 1 << 0 : 0));
+            emit8(0x8b);
+            emit8(0x80 | (encode_reg(dst.reg) << 3) | encode_reg(src.reg));
+            emit32(src.offset_or_immediate);
+            return;
+        }
+
+        VERIFY_NOT_REACHED();
+    }
+
+    void emit8(u8 value)
+    {
+        m_output.append(value);
+    }
+
+    void emit32(u32 value)
+    {
+        m_output.append((value >> 0) & 0xff);
+        m_output.append((value >> 8) & 0xff);
+        m_output.append((value >> 16) & 0xff);
+        m_output.append((value >> 24) & 0xff);
+    }
+
+    void emit64(u64 value)
+    {
+        m_output.append((value >> 0) & 0xff);
+        m_output.append((value >> 8) & 0xff);
+        m_output.append((value >> 16) & 0xff);
+        m_output.append((value >> 24) & 0xff);
+        m_output.append((value >> 32) & 0xff);
+        m_output.append((value >> 40) & 0xff);
+        m_output.append((value >> 48) & 0xff);
+        m_output.append((value >> 56) & 0xff);
+    }
+
+    void load_immediate64(Reg dst, u64 imm)
+    {
+        mov(Operand::Register(dst), Operand::Imm64(imm));
+    }
+
+    void increment(Reg dst)
+    {
+        emit8(0x48);
+        emit8(0xff);
+        emit8(0xc0 | to_underlying(dst));
+    }
+
+    void less_than(Reg dst, Reg src)
+    {
+        // cmp src, dst
+        emit8(0x48);
+        emit8(0x39);
+        emit8(0xc0 | (to_underlying(src) << 3) | to_underlying(dst));
+
+        // setl dst
+        emit8(0x0f);
+        emit8(0x9c);
+        emit8(0xc0 | to_underlying(dst));
+
+        // movzx dst, dst
+        emit8(0x48);
+        emit8(0x0f);
+        emit8(0xb6);
+        emit8(0xc0 | (to_underlying(dst) << 3) | to_underlying(dst));
+    }
+
+    void jump(Bytecode::BasicBlock& target)
+    {
+        // jmp target (RIP-relative 32-bit offset)
+        emit8(0xe9);
+        target.jumps_to_here.append(m_output.size());
+        emit32(0xdeadbeef);
+    }
+
+    void jump_conditional(Reg reg, Bytecode::BasicBlock& true_target, Bytecode::BasicBlock& false_target)
+    {
+        // if (reg & 1) is 0, jump to false_target, else jump to true_target
+        // test reg, 1
+        emit8(0x48 | ((to_underlying(reg) >= 8) ? 1 << 2 : 0));
+        emit8(0xf7);
+        emit8(0xc0 | encode_reg(reg));
+        emit32(0x01);
+
+        // jz false_target (RIP-relative 32-bit offset)
+        emit8(0x0f);
+        emit8(0x84);
+        false_target.jumps_to_here.append(m_output.size());
+        emit32(0xdeadbeef);
+
+        // jmp true_target (RIP-relative 32-bit offset)
+        jump(true_target);
+    }
+
+    void exit()
+    {
+        // ret
+        emit8(0xc3);
+    }
+
+    void native_call(void* callee)
+    {
+        // push caller-saved registers on the stack
+        // (callee-saved registers: RBX, RSP, RBP, and R12–R15)
+
+        // push RCX, RDX, RSI, RDI, R8, R9, R10, R11
+        emit8(0x51);
+        emit8(0x52);
+        emit8(0x56);
+        emit8(0x57);
+        emit8(0x41);
+        emit8(0x50);
+        emit8(0x41);
+        emit8(0x51);
+        emit8(0x41);
+        emit8(0x52);
+        emit8(0x41);
+        emit8(0x53);
+
+        // align the stack to 16-byte boundary
+        emit8(0x48);
+        emit8(0x83);
+        emit8(0xec);
+        emit8(0x08);
+
+        // load callee into RAX and make indirect call
+        emit8(0x48);
+        emit8(0xb8);
+        emit64((u64)callee);
+        emit8(0xff);
+        emit8(0xd0);
+
+        // adjust stack pointer
+        emit8(0x48);
+        emit8(0x83);
+        emit8(0xc4);
+        emit8(0x08);
+
+        // restore caller-saved registers from the stack
+        // pop R11, R10, R9, R8, RDI, RSI, RDX, RCX
+        emit8(0x41);
+        emit8(0x5b);
+        emit8(0x41);
+        emit8(0x5a);
+        emit8(0x41);
+        emit8(0x59);
+        emit8(0x41);
+        emit8(0x58);
+        emit8(0x5f);
+        emit8(0x5e);
+        emit8(0x5a);
+        emit8(0x59);
+    }
+};
+
+}

+ 221 - 0
Userland/Libraries/LibJS/JIT/Compiler.cpp

@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/OwnPtr.h>
+#include <LibJS/Bytecode/Instruction.h>
+#include <LibJS/JIT/Compiler.h>
+#include <LibJS/Runtime/ValueInlines.h>
+#include <sys/mman.h>
+
+namespace JS::JIT {
+
+void Compiler::store_vm_register(Bytecode::Register dst, Assembler::Reg src)
+{
+    m_assembler.mov(
+        Assembler::Operand::Mem64BaseAndOffset(Assembler::Reg::RegisterArrayBase, dst.index() * sizeof(Value)),
+        Assembler::Operand::Register(src));
+}
+
+void Compiler::load_vm_register(Assembler::Reg dst, Bytecode::Register src)
+{
+    m_assembler.mov(
+        Assembler::Operand::Register(dst),
+        Assembler::Operand::Mem64BaseAndOffset(Assembler::Reg::RegisterArrayBase, src.index() * sizeof(Value)));
+}
+
+void Compiler::store_vm_local(size_t dst, Assembler::Reg src)
+{
+    m_assembler.mov(
+        Assembler::Operand::Mem64BaseAndOffset(Assembler::Reg::LocalsArrayBase, dst * sizeof(Value)),
+        Assembler::Operand::Register(src));
+}
+
+void Compiler::load_vm_local(Assembler::Reg dst, size_t src)
+{
+    m_assembler.mov(
+        Assembler::Operand::Register(dst),
+        Assembler::Operand::Mem64BaseAndOffset(Assembler::Reg::LocalsArrayBase, src * sizeof(Value)));
+}
+
+void Compiler::compile_load_immediate(Bytecode::Op::LoadImmediate const& op)
+{
+    m_assembler.mov(
+        Assembler::Operand::Register(Assembler::Reg::GPR0),
+        Assembler::Operand::Imm64(op.value().encoded()));
+    store_vm_register(Bytecode::Register::accumulator(), Assembler::Reg::GPR0);
+}
+
+void Compiler::compile_load(Bytecode::Op::Load const& op)
+{
+    load_vm_register(Assembler::Reg::GPR0, op.src());
+    store_vm_register(Bytecode::Register::accumulator(), Assembler::Reg::GPR0);
+}
+
+void Compiler::compile_store(Bytecode::Op::Store const& op)
+{
+    load_vm_register(Assembler::Reg::GPR0, Bytecode::Register::accumulator());
+    store_vm_register(op.dst(), Assembler::Reg::GPR0);
+}
+
+void Compiler::compile_get_local(Bytecode::Op::GetLocal const& op)
+{
+    load_vm_local(Assembler::Reg::GPR0, op.index());
+    store_vm_register(Bytecode::Register::accumulator(), Assembler::Reg::GPR0);
+}
+
+void Compiler::compile_set_local(Bytecode::Op::SetLocal const& op)
+{
+    load_vm_register(Assembler::Reg::GPR0, Bytecode::Register::accumulator());
+    store_vm_local(op.index(), Assembler::Reg::GPR0);
+}
+
+void Compiler::compile_jump(Bytecode::Op::Jump const& op)
+{
+    m_assembler.jump(const_cast<Bytecode::BasicBlock&>(op.true_target()->block()));
+}
+
+static bool cxx_to_boolean(VM&, Value value)
+{
+    return value.to_boolean();
+}
+
+void Compiler::compile_to_boolean(Assembler::Reg reg)
+{
+    m_assembler.mov(
+        Assembler::Operand::Register(Assembler::Reg::Arg1),
+        Assembler::Operand::Register(reg));
+    m_assembler.native_call((void*)cxx_to_boolean);
+    m_assembler.mov(
+        Assembler::Operand::Register(reg),
+        Assembler::Operand::Register(Assembler::Reg::Ret));
+}
+
+void Compiler::compile_jump_conditional(Bytecode::Op::JumpConditional const& op)
+{
+    load_vm_register(Assembler::Reg::GPR0, Bytecode::Register::accumulator());
+
+    compile_to_boolean(Assembler::Reg::GPR0);
+
+    m_assembler.jump_conditional(Assembler::Reg::GPR0,
+        const_cast<Bytecode::BasicBlock&>(op.true_target()->block()),
+        const_cast<Bytecode::BasicBlock&>(op.false_target()->block()));
+}
+
+[[maybe_unused]] static Value cxx_less_than(VM& vm, Value lhs, Value rhs)
+{
+    // FIXME: Handle exceptions!
+    return MUST(less_than(vm, lhs, rhs));
+}
+
+void Compiler::compile_less_than(Bytecode::Op::LessThan const& op)
+{
+    load_vm_register(Assembler::Reg::Arg1, op.lhs());
+    load_vm_register(Assembler::Reg::Arg2, Bytecode::Register::accumulator());
+    m_assembler.native_call((void*)cxx_less_than);
+    store_vm_register(Bytecode::Register::accumulator(), Assembler::Reg::Ret);
+}
+
+[[maybe_unused]] static Value cxx_increment(VM& vm, Value value)
+{
+    // FIXME: Handle exceptions!
+    auto old_value = MUST(value.to_numeric(vm));
+    if (old_value.is_number())
+        return Value(old_value.as_double() + 1);
+    return BigInt::create(vm, old_value.as_bigint().big_integer().plus(Crypto::SignedBigInteger { 1 }));
+}
+
+void Compiler::compile_increment(Bytecode::Op::Increment const&)
+{
+    load_vm_register(Assembler::Reg::Arg1, Bytecode::Register::accumulator());
+    m_assembler.native_call((void*)cxx_increment);
+    store_vm_register(Bytecode::Register::accumulator(), Assembler::Reg::Ret);
+}
+
+OwnPtr<NativeExecutable> Compiler::compile(Bytecode::Executable const& bytecode_executable)
+{
+    if (getenv("LIBJS_NO_JIT"))
+        return nullptr;
+
+    Compiler compiler;
+
+    compiler.m_assembler.mov(
+        Assembler::Operand::Register(Assembler::Reg::RegisterArrayBase),
+        Assembler::Operand::Register(Assembler::Reg::Arg1));
+
+    compiler.m_assembler.mov(
+        Assembler::Operand::Register(Assembler::Reg::LocalsArrayBase),
+        Assembler::Operand::Register(Assembler::Reg::Arg2));
+
+    for (auto& block : bytecode_executable.basic_blocks) {
+        block->offset = compiler.m_output.size();
+        auto it = Bytecode::InstructionStreamIterator(block->instruction_stream());
+        while (!it.at_end()) {
+            auto const& op = *it;
+            switch (op.type()) {
+            case Bytecode::Instruction::Type::LoadImmediate:
+                compiler.compile_load_immediate(static_cast<Bytecode::Op::LoadImmediate const&>(op));
+                break;
+            case Bytecode::Instruction::Type::Store:
+                compiler.compile_store(static_cast<Bytecode::Op::Store const&>(op));
+                break;
+            case Bytecode::Instruction::Type::Load:
+                compiler.compile_load(static_cast<Bytecode::Op::Load const&>(op));
+                break;
+            case Bytecode::Instruction::Type::GetLocal:
+                compiler.compile_get_local(static_cast<Bytecode::Op::GetLocal const&>(op));
+                break;
+            case Bytecode::Instruction::Type::SetLocal:
+                compiler.compile_set_local(static_cast<Bytecode::Op::SetLocal const&>(op));
+                break;
+            case Bytecode::Instruction::Type::Jump:
+                compiler.compile_jump(static_cast<Bytecode::Op::Jump const&>(op));
+                break;
+            case Bytecode::Instruction::Type::JumpConditional:
+                compiler.compile_jump_conditional(static_cast<Bytecode::Op::JumpConditional const&>(op));
+                break;
+            case Bytecode::Instruction::Type::LessThan:
+                compiler.compile_less_than(static_cast<Bytecode::Op::LessThan const&>(op));
+                break;
+            case Bytecode::Instruction::Type::Increment:
+                compiler.compile_increment(static_cast<Bytecode::Op::Increment const&>(op));
+                break;
+            default:
+                dbgln("JIT compilation failed: {}", bytecode_executable.name);
+                dbgln("Unsupported bytecode op: {}", op.to_deprecated_string(bytecode_executable));
+                return nullptr;
+            }
+
+            ++it;
+        }
+        if (!block->is_terminated())
+            compiler.m_assembler.exit();
+    }
+
+    // Patch up all the jumps
+    for (auto& block : bytecode_executable.basic_blocks) {
+        for (auto& jump : block->jumps_to_here) {
+            auto offset = block->offset - jump - 4;
+            compiler.m_output[jump + 0] = (offset >> 0) & 0xff;
+            compiler.m_output[jump + 1] = (offset >> 8) & 0xff;
+            compiler.m_output[jump + 2] = (offset >> 16) & 0xff;
+            compiler.m_output[jump + 3] = (offset >> 24) & 0xff;
+        }
+    }
+
+    write(STDOUT_FILENO, compiler.m_output.data(), compiler.m_output.size());
+
+    auto* executable_memory = mmap(nullptr, compiler.m_output.size(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+    if (executable_memory == MAP_FAILED) {
+        perror("mmap");
+        return nullptr;
+    }
+
+    memcpy(executable_memory, compiler.m_output.data(), compiler.m_output.size());
+    mprotect(executable_memory, compiler.m_output.size(), PROT_READ | PROT_EXEC);
+    return make<NativeExecutable>(executable_memory, compiler.m_output.size());
+}
+
+}

+ 43 - 0
Userland/Libraries/LibJS/JIT/Compiler.h

@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibJS/Bytecode/Executable.h>
+#include <LibJS/Bytecode/Op.h>
+#include <LibJS/JIT/Assembler.h>
+#include <LibJS/JIT/NativeExecutable.h>
+
+namespace JS::JIT {
+
+class Compiler {
+public:
+    static OwnPtr<NativeExecutable> compile(Bytecode::Executable const&);
+
+private:
+    void compile_load_immediate(Bytecode::Op::LoadImmediate const&);
+    void compile_load(Bytecode::Op::Load const&);
+    void compile_store(Bytecode::Op::Store const&);
+    void compile_get_local(Bytecode::Op::GetLocal const&);
+    void compile_set_local(Bytecode::Op::SetLocal const&);
+    void compile_jump(Bytecode::Op::Jump const&);
+    void compile_jump_conditional(Bytecode::Op::JumpConditional const&);
+    void compile_less_than(Bytecode::Op::LessThan const&);
+    void compile_increment(Bytecode::Op::Increment const&);
+
+    void store_vm_register(Bytecode::Register, Assembler::Reg);
+    void load_vm_register(Assembler::Reg, Bytecode::Register);
+
+    void store_vm_local(size_t, Assembler::Reg);
+    void load_vm_local(Assembler::Reg, size_t);
+
+    void compile_to_boolean(Assembler::Reg);
+
+    Vector<u8> m_output;
+    Assembler m_assembler { m_output };
+};
+
+}

+ 33 - 0
Userland/Libraries/LibJS/JIT/NativeExecutable.cpp

@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <LibJS/Bytecode/Interpreter.h>
+#include <LibJS/JIT/NativeExecutable.h>
+#include <LibJS/Runtime/VM.h>
+#include <sys/mman.h>
+
+namespace JS::JIT {
+
+NativeExecutable::NativeExecutable(void* code, size_t size)
+    : m_code(code)
+    , m_size(size)
+{
+}
+
+NativeExecutable::~NativeExecutable()
+{
+    munmap(m_code, m_size);
+}
+
+void NativeExecutable::run(VM& vm)
+{
+    typedef void (*JITCode)(VM&, Value* registers, Value* locals);
+    ((JITCode)m_code)(vm,
+        vm.bytecode_interpreter().registers().data(),
+        vm.running_execution_context().local_variables.data());
+}
+
+}

+ 30 - 0
Userland/Libraries/LibJS/JIT/NativeExecutable.h

@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2023, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Noncopyable.h>
+#include <AK/Types.h>
+#include <LibJS/Runtime/Completion.h>
+
+namespace JS::JIT {
+
+class NativeExecutable {
+    AK_MAKE_NONCOPYABLE(NativeExecutable);
+    AK_MAKE_NONMOVABLE(NativeExecutable);
+
+public:
+    NativeExecutable(void* code, size_t size);
+    ~NativeExecutable();
+
+    void run(VM&);
+
+private:
+    void* m_code { nullptr };
+    size_t m_size { 0 };
+};
+
+}