2020-01-18 08:38:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2019-04-03 13:13:07 +00:00
|
|
|
#pragma once
|
|
|
|
|
2019-08-08 08:53:24 +00:00
|
|
|
#include <AK/InlineLinkedList.h>
|
2019-09-16 08:19:44 +00:00
|
|
|
#include <AK/String.h>
|
2020-09-03 04:57:09 +00:00
|
|
|
#include <AK/WeakPtr.h>
|
2020-02-24 12:24:30 +00:00
|
|
|
#include <AK/Weakable.h>
|
2020-06-27 23:06:33 +00:00
|
|
|
#include <Kernel/Arch/i386/CPU.h>
|
2019-09-16 08:19:44 +00:00
|
|
|
#include <Kernel/Heap/SlabAllocator.h>
|
2020-09-05 21:52:14 +00:00
|
|
|
#include <Kernel/VM/PageFaultResponse.h>
|
|
|
|
#include <Kernel/VM/PurgeablePageRanges.h>
|
2019-05-17 02:32:08 +00:00
|
|
|
#include <Kernel/VM/RangeAllocator.h>
|
2020-04-05 19:58:44 +00:00
|
|
|
#include <Kernel/VM/VMObject.h>
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2019-04-03 13:13:07 +00:00
|
|
|
class Inode;
|
|
|
|
class VMObject;
|
|
|
|
|
2020-02-24 12:24:30 +00:00
|
|
|
class Region final
|
|
|
|
: public InlineLinkedListNode<Region>
|
2020-09-03 04:57:09 +00:00
|
|
|
, public Weakable<Region>
|
|
|
|
, public PurgeablePageRanges {
|
2019-04-03 13:13:07 +00:00
|
|
|
friend class MemoryManager;
|
2019-06-07 09:43:58 +00:00
|
|
|
|
2019-09-16 08:19:44 +00:00
|
|
|
MAKE_SLAB_ALLOCATED(Region)
|
2019-04-03 13:13:07 +00:00
|
|
|
public:
|
2021-01-29 13:38:49 +00:00
|
|
|
enum Access : u8 {
|
2019-05-30 14:14:37 +00:00
|
|
|
Read = 1,
|
|
|
|
Write = 2,
|
|
|
|
Execute = 4,
|
2021-01-29 13:38:49 +00:00
|
|
|
HasBeenReadable = 16,
|
|
|
|
HasBeenWritable = 32,
|
|
|
|
HasBeenExecutable = 64,
|
2019-05-30 14:14:37 +00:00
|
|
|
};
|
|
|
|
|
2021-01-26 15:56:34 +00:00
|
|
|
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared);
|
2020-01-09 21:29:31 +00:00
|
|
|
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
|
2019-07-19 14:09:34 +00:00
|
|
|
|
2019-04-03 13:13:07 +00:00
|
|
|
~Region();
|
|
|
|
|
2019-08-29 18:55:40 +00:00
|
|
|
const Range& range() const { return m_range; }
|
2019-06-07 10:56:50 +00:00
|
|
|
VirtualAddress vaddr() const { return m_range.base(); }
|
2019-05-17 02:32:08 +00:00
|
|
|
size_t size() const { return m_range.size(); }
|
2019-05-30 14:14:37 +00:00
|
|
|
bool is_readable() const { return m_access & Access::Read; }
|
|
|
|
bool is_writable() const { return m_access & Access::Write; }
|
|
|
|
bool is_executable() const { return m_access & Access::Execute; }
|
2021-01-29 13:38:49 +00:00
|
|
|
|
|
|
|
bool has_been_readable() const { return m_access & Access::HasBeenReadable; }
|
|
|
|
bool has_been_writable() const { return m_access & Access::HasBeenWritable; }
|
|
|
|
bool has_been_executable() const { return m_access & Access::HasBeenExecutable; }
|
|
|
|
|
2020-01-09 21:29:31 +00:00
|
|
|
bool is_cacheable() const { return m_cacheable; }
|
2019-06-07 18:58:12 +00:00
|
|
|
const String& name() const { return m_name; }
|
2019-08-29 18:55:40 +00:00
|
|
|
unsigned access() const { return m_access; }
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2019-06-07 18:58:12 +00:00
|
|
|
void set_name(const String& name) { m_name = name; }
|
2020-09-12 03:11:07 +00:00
|
|
|
void set_name(String&& name) { m_name = move(name); }
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2019-09-04 09:27:14 +00:00
|
|
|
const VMObject& vmobject() const { return *m_vmobject; }
|
|
|
|
VMObject& vmobject() { return *m_vmobject; }
|
2020-09-03 04:57:09 +00:00
|
|
|
void set_vmobject(NonnullRefPtr<VMObject>&&);
|
2019-04-03 13:13:07 +00:00
|
|
|
|
|
|
|
bool is_shared() const { return m_shared; }
|
|
|
|
void set_shared(bool shared) { m_shared = shared; }
|
|
|
|
|
2019-11-17 11:11:43 +00:00
|
|
|
bool is_stack() const { return m_stack; }
|
|
|
|
void set_stack(bool stack) { m_stack = stack; }
|
|
|
|
|
2019-11-24 11:24:16 +00:00
|
|
|
bool is_mmap() const { return m_mmap; }
|
|
|
|
void set_mmap(bool mmap) { m_mmap = mmap; }
|
|
|
|
|
2019-07-19 14:09:34 +00:00
|
|
|
bool is_user_accessible() const { return m_user_accessible; }
|
2020-06-02 04:55:09 +00:00
|
|
|
bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
|
|
|
|
|
2021-01-23 16:11:45 +00:00
|
|
|
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);
|
2019-11-03 23:45:33 +00:00
|
|
|
|
2021-01-01 07:02:40 +00:00
|
|
|
OwnPtr<Region> clone(Process&);
|
2019-05-17 02:32:08 +00:00
|
|
|
|
2019-06-07 10:56:50 +00:00
|
|
|
bool contains(VirtualAddress vaddr) const
|
2019-04-03 13:13:07 +00:00
|
|
|
{
|
2019-06-07 10:56:50 +00:00
|
|
|
return m_range.contains(vaddr);
|
2019-04-03 13:13:07 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 18:55:40 +00:00
|
|
|
bool contains(const Range& range) const
|
|
|
|
{
|
|
|
|
return m_range.contains(range);
|
|
|
|
}
|
|
|
|
|
2019-06-07 10:56:50 +00:00
|
|
|
unsigned page_index_from_address(VirtualAddress vaddr) const
|
2019-04-03 13:13:07 +00:00
|
|
|
{
|
2019-06-07 10:56:50 +00:00
|
|
|
return (vaddr - m_range.base()).get() / PAGE_SIZE;
|
2019-04-03 13:13:07 +00:00
|
|
|
}
|
2020-09-18 07:49:51 +00:00
|
|
|
|
2020-07-06 18:47:08 +00:00
|
|
|
VirtualAddress vaddr_from_page_index(size_t page_index) const
|
|
|
|
{
|
|
|
|
return vaddr().offset(page_index * PAGE_SIZE);
|
|
|
|
}
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2021-01-02 19:03:14 +00:00
|
|
|
bool translate_vmobject_page(size_t& index) const
|
|
|
|
{
|
|
|
|
auto first_index = first_page_index();
|
|
|
|
if (index < first_index) {
|
|
|
|
index = first_index;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
index -= first_index;
|
|
|
|
auto total_page_count = this->page_count();
|
|
|
|
if (index >= total_page_count) {
|
|
|
|
index = first_index + total_page_count - 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool translate_vmobject_page_range(size_t& index, size_t& page_count) const
|
|
|
|
{
|
|
|
|
auto first_index = first_page_index();
|
|
|
|
if (index < first_index) {
|
|
|
|
auto delta = first_index - index;
|
|
|
|
index = first_index;
|
|
|
|
if (delta >= page_count) {
|
|
|
|
page_count = 0;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
page_count -= delta;
|
|
|
|
}
|
|
|
|
index -= first_index;
|
|
|
|
auto total_page_count = this->page_count();
|
|
|
|
if (index + page_count > total_page_count) {
|
|
|
|
page_count = total_page_count - index;
|
|
|
|
if (page_count == 0)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const
|
|
|
|
{
|
|
|
|
return first_page_index() + page_index;
|
|
|
|
}
|
|
|
|
|
2019-04-03 13:13:07 +00:00
|
|
|
size_t first_page_index() const
|
|
|
|
{
|
2019-12-19 18:13:44 +00:00
|
|
|
return m_offset_in_vmobject / PAGE_SIZE;
|
2019-04-03 13:13:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t page_count() const
|
|
|
|
{
|
2019-05-17 02:32:08 +00:00
|
|
|
return size() / PAGE_SIZE;
|
2019-04-03 13:13:07 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 14:19:50 +00:00
|
|
|
const PhysicalPage* physical_page(size_t index) const
|
|
|
|
{
|
|
|
|
ASSERT(index < page_count());
|
|
|
|
return vmobject().physical_pages()[first_page_index() + index];
|
|
|
|
}
|
|
|
|
|
|
|
|
RefPtr<PhysicalPage>& physical_page_slot(size_t index)
|
|
|
|
{
|
|
|
|
ASSERT(index < page_count());
|
|
|
|
return vmobject().physical_pages()[first_page_index() + index];
|
|
|
|
}
|
|
|
|
|
2019-10-01 09:38:59 +00:00
|
|
|
size_t offset_in_vmobject() const
|
|
|
|
{
|
2019-12-19 18:13:44 +00:00
|
|
|
return m_offset_in_vmobject;
|
2019-10-01 09:38:59 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 06:21:58 +00:00
|
|
|
size_t offset_in_vmobject_from_vaddr(VirtualAddress vaddr) const
|
|
|
|
{
|
|
|
|
return m_offset_in_vmobject + vaddr.get() - this->vaddr().get();
|
|
|
|
}
|
|
|
|
|
2019-04-03 13:13:07 +00:00
|
|
|
size_t amount_resident() const;
|
|
|
|
size_t amount_shared() const;
|
2019-12-29 11:28:32 +00:00
|
|
|
size_t amount_dirty() const;
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2019-10-01 17:58:41 +00:00
|
|
|
bool should_cow(size_t page_index) const;
|
|
|
|
void set_should_cow(size_t page_index, bool);
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2020-09-05 21:52:14 +00:00
|
|
|
size_t cow_pages() const;
|
2019-12-15 15:53:00 +00:00
|
|
|
|
2019-12-25 01:39:03 +00:00
|
|
|
void set_readable(bool b) { set_access_bit(Access::Read, b); }
|
|
|
|
void set_writable(bool b) { set_access_bit(Access::Write, b); }
|
|
|
|
void set_executable(bool b) { set_access_bit(Access::Execute, b); }
|
2019-12-02 18:14:16 +00:00
|
|
|
|
2020-01-09 21:29:31 +00:00
|
|
|
void set_page_directory(PageDirectory&);
|
2020-09-01 22:10:54 +00:00
|
|
|
bool map(PageDirectory&);
|
2019-11-03 19:37:03 +00:00
|
|
|
enum class ShouldDeallocateVirtualMemoryRange {
|
|
|
|
No,
|
|
|
|
Yes,
|
|
|
|
};
|
|
|
|
void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
|
|
|
|
|
2019-11-03 19:59:54 +00:00
|
|
|
void remap();
|
2019-11-03 14:32:11 +00:00
|
|
|
|
2019-08-08 08:53:24 +00:00
|
|
|
// For InlineLinkedListNode
|
|
|
|
Region* m_next { nullptr };
|
|
|
|
Region* m_prev { nullptr };
|
|
|
|
|
2019-09-27 12:19:07 +00:00
|
|
|
// NOTE: These are public so we can make<> them.
|
2021-01-02 15:38:05 +00:00
|
|
|
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable, bool kernel, bool shared);
|
2019-07-19 14:09:34 +00:00
|
|
|
|
2021-01-02 19:03:14 +00:00
|
|
|
bool remap_vmobject_page_range(size_t page_index, size_t page_count);
|
2020-09-03 04:57:09 +00:00
|
|
|
|
|
|
|
bool is_volatile(VirtualAddress vaddr, size_t size) const;
|
|
|
|
enum class SetVolatileError {
|
|
|
|
Success = 0,
|
|
|
|
NotPurgeable,
|
|
|
|
OutOfMemory
|
|
|
|
};
|
|
|
|
SetVolatileError set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged);
|
|
|
|
|
|
|
|
RefPtr<Process> get_owner();
|
|
|
|
|
2021-02-02 18:56:11 +00:00
|
|
|
bool is_syscall_region() const { return m_syscall_region; }
|
|
|
|
void set_syscall_region(bool b) { m_syscall_region = b; }
|
|
|
|
|
2019-09-27 12:19:07 +00:00
|
|
|
private:
|
2021-01-02 19:03:14 +00:00
|
|
|
bool do_remap_vmobject_page_range(size_t page_index, size_t page_count);
|
|
|
|
|
2019-12-25 01:39:03 +00:00
|
|
|
void set_access_bit(Access access, bool b)
|
|
|
|
{
|
|
|
|
if (b)
|
2021-01-29 13:38:49 +00:00
|
|
|
m_access |= access | (access << 4);
|
2019-12-25 01:39:03 +00:00
|
|
|
else
|
|
|
|
m_access &= ~access;
|
|
|
|
}
|
|
|
|
|
2021-01-02 19:03:14 +00:00
|
|
|
bool do_remap_vmobject_page(size_t index, bool with_flush = true);
|
|
|
|
bool remap_vmobject_page(size_t index, bool with_flush = true);
|
2020-07-06 18:47:08 +00:00
|
|
|
|
2019-11-03 23:45:33 +00:00
|
|
|
PageFaultResponse handle_cow_fault(size_t page_index);
|
2021-01-23 16:11:45 +00:00
|
|
|
PageFaultResponse handle_inode_fault(size_t page_index, ScopedSpinLock<RecursiveSpinLock>&);
|
2019-11-03 23:45:33 +00:00
|
|
|
PageFaultResponse handle_zero_fault(size_t page_index);
|
|
|
|
|
2020-09-01 22:10:54 +00:00
|
|
|
bool map_individual_page_impl(size_t page_index);
|
2020-01-01 18:30:38 +00:00
|
|
|
|
2020-09-03 04:57:09 +00:00
|
|
|
void register_purgeable_page_ranges();
|
|
|
|
void unregister_purgeable_page_ranges();
|
|
|
|
|
2019-06-21 16:37:47 +00:00
|
|
|
RefPtr<PageDirectory> m_page_directory;
|
2019-05-17 02:32:08 +00:00
|
|
|
Range m_range;
|
2019-12-19 18:13:44 +00:00
|
|
|
size_t m_offset_in_vmobject { 0 };
|
2019-09-04 09:27:14 +00:00
|
|
|
NonnullRefPtr<VMObject> m_vmobject;
|
2019-04-03 13:13:07 +00:00
|
|
|
String m_name;
|
2019-07-03 19:17:35 +00:00
|
|
|
u8 m_access { 0 };
|
2020-02-19 11:01:39 +00:00
|
|
|
bool m_shared : 1 { false };
|
|
|
|
bool m_user_accessible : 1 { false };
|
|
|
|
bool m_cacheable : 1 { false };
|
|
|
|
bool m_stack : 1 { false };
|
|
|
|
bool m_mmap : 1 { false };
|
2020-06-02 04:55:09 +00:00
|
|
|
bool m_kernel : 1 { false };
|
2021-02-02 18:56:11 +00:00
|
|
|
bool m_syscall_region : 1 { false };
|
2020-09-03 04:57:09 +00:00
|
|
|
WeakPtr<Process> m_owner;
|
2019-04-03 13:13:07 +00:00
|
|
|
};
|
2020-02-16 00:27:42 +00:00
|
|
|
|
2020-07-30 21:38:15 +00:00
|
|
|
inline unsigned prot_to_region_access_flags(int prot)
|
|
|
|
{
|
|
|
|
unsigned access = 0;
|
|
|
|
if (prot & PROT_READ)
|
|
|
|
access |= Region::Access::Read;
|
|
|
|
if (prot & PROT_WRITE)
|
|
|
|
access |= Region::Access::Write;
|
|
|
|
if (prot & PROT_EXEC)
|
|
|
|
access |= Region::Access::Execute;
|
|
|
|
return access;
|
|
|
|
}
|
|
|
|
|
2020-12-29 01:11:47 +00:00
|
|
|
inline int region_access_flags_to_prot(unsigned access)
|
|
|
|
{
|
|
|
|
int prot = 0;
|
|
|
|
if (access & Region::Access::Read)
|
|
|
|
prot |= PROT_READ;
|
|
|
|
if (access & Region::Access::Write)
|
|
|
|
prot |= PROT_WRITE;
|
|
|
|
if (access & Region::Access::Execute)
|
|
|
|
prot |= PROT_EXEC;
|
|
|
|
return prot;
|
|
|
|
}
|
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|